Example 18-6 : Complete Driver for Hypothetical GIO Device
/* Source for a hypothetical GIO board device; it can be compiled for * devices that support DMA (with or without scatter gather support), * or for PIO mode only. This version is designed for IRIX 6.2 or later. * Dave Olson, 5/93. 6.2 port by Dave Cortesi 9/95. */ /* Compilation: Define the environment variable CPUBOARD as IP20, IP22, * or IP26 (the only GIO platforms). Then include the build rules from * /var/sysgen/Makefile.kernio to set $CFLAGS including: # _K32U32 kernel in 32 bit mode running only 32 bit binaries # _K64U64 kernel in 64 bit mode running 32/64 bit binaries (IP26) # -DR4000 R4000 machine (IP20, IP22) # -DTFP R8000 machine (IP26) # -G 8 global pointer set to 8 (GIO drivers cannot be loadable) # -elf produce an elf executable */ /* the following definitions choose between PIO vs DMA supporting * boards, and if DMA is supported, whether hardware scatter/gather * is supported. */ #define GBD_NODMA 0 /* non-zero for PIO version of driver */ #define GBD_NUM_DMA_PGS 8 /* 0 for no hardware scatter/gather * support, else number of pages of * scatter/gather per request */ #include <sys/param.h> #include <sys/systm.h> #include <sys/cpu.h> #include <sys/buf.h> #include <sys/cred.h> #include <sys/uio.h> #include <sys/ddi.h> #include <sys/errno.h> #include <sys/cmn_err.h> #include <sys/edt.h> #include <sys/conf.h> /* for flags D_MP */ /* gbd (for Gio BoarD) is the driver prefix, specified in the * file /var/sysgen/master.d/gbd and in VECTOR module=gbd lines. * This driver is multiprocessor-safe (even though no GIO platform * is a multiprocessor). */ int gbddevflags = D_MP; /* these defines and structures defining the (hypothetical) hardware * interface would normally be in a separate header file */ #define GBD_BOARD_ID 0x75 #define GBD_MASK 0xff /* use 0xff if using only first byte * of ID word, use 0xffff if using * whole ID word */ #define GBD_MEMSIZE 0x8000 /* command definitions */ #define GBD_GO 1 /* state definitions */ #define GBD_SLEEPING 1 #define GBD_DONE 2 /* direction of DMA definitions */ #define GBD_READ 0 #define GBD_WRITE 1 /* status defines */ #define GBD_INTR_PEND 0x80 /* device register interface to the board */ typedef struct gbd_device { __uint32_t command; __uint32_t count; __uint32_t direction; __uint32_t offset; __uint32_t status; /* errors, interrupt pending, etc. */ #if (!GBD_NODMA) /* if hardware DMA */ #if (GBD_NUM_DMA_PGS) /* if hardware scatter/gather */ /* board register points to array of GBD_NUM_DMA_PGS target * addresses in board memory. Board can relocate the array * by changing the content of sgregisters. */ volatile paddr_t *sgregisters; #else /* dma to contiguous segment only */ paddr_t startaddr; #endif #endif } gbd_regs; static struct gbd_info { gbd_regs *gbd_device; /* ->board regs */ char *gbd_memory; /* ->on-board memory */ sema_t use_lock; /* upper-half exclusion from board */ lock_t reg_lock; /* spinlock for interrupt exclusion */ #if GBD_NODMA int gbd_state; /* transfer state of PIO driver */ sv_t intr_wait; /* sync var for waiting on intr */ #else /* DMA supported somehow */ buf_t *curbp; /* current buf struct */ #if (0 == GBD_NUM_DMA_PGS) /* software scatter/gather */ caddr_t curaddr; /* current address to transfer */ int curcount; /* count being transferred */ int totcount; /* total size this transfer */ #endif #endif } gbd_globals[2]; void gbdintr(int, struct eframe_s *); /* early device table initialization routine. Validate the values * from a VECTOR line and save in the per-device info structure. */ void gbdedtinit(register edt_t *e) { int slot; /* which slot this device is in */ __uint32_t val = 0; /* board ID value */ register struct gbd_info *inf; /* Check to see if the device is present */ if(!badaddr(e->e_base, sizeof(__uint32_t))) val = *(__uint32_t *)(e->e_base); if ((val && GBD_MASK) != GBD_BOARD_ID) { if (showconfig) { cmn_err (CE_CONT, "gbdedtinit: board not installed."); } return; } /* figure out slot from VECTOR base= value */ if(e->e_base == (caddr_t)0xBF400000) slot = GIO_SLOT_0; else if(e->e_base == (caddr_t)0xBF600000) slot = GIO_SLOT_1; else { cmn_err (CE_NOTE, "ERROR from edtinit: Bad base address %x\n", e->e_base); return; } #if IP20 /* for Indigo R4000, set up board as a realtime bus master */ setgioconfig(slot,GIO64_ARB_EXP0_RT | GIO64_ARB_EXP0_MST); #endif #if (IP22|IP26) /* for Indigo2, set up as a pipelined, realtime bus master */ setgioconfig(slot,GIO64_ARB_EXP0_RT | GIO64_ARB_EXP0_MST); #endif /* Initialize the per-device (per-slot) info, including the * device addresses from the edt_t. */ inf = &gbd_globals[GIO_SLOT_0 ? 0 : 1]; inf->gbd_device = (struct gbd_device *)e->e_base; inf->gbd_memory = (char *)e->e_base2; initsema(&inf->use_lock,1); spinlock_init(&inf->reg_lock,NULL); setgiovector(GIO_INTERRUPT_1,slot,gbdintr,0); if (showconfig) { cmn_err (CE_CONT, "gbdedtinit: board %x installed\n", e->e_base); } } /* OPEN: minor number used to select slot. Merely test that * the device was initialized. */ /* ARGSUSED */ gbdopen(dev_t *devp, int flag, int otyp, cred_t *crp) { if(! (gbd_globals[geteminor(*devp)&1].gbd_device) ) return ENXIO; /* board not present */ return 0; /* OK */ } /* CLOSE: Nothing to do. */ /* ARGSUSED */ gbdclose(dev_t dev, int flag, int otyp, cred_t *crp) { return 0; } #if (GBD_NODMA) /***** Non-DMA, therefore character, device ******/ /* WRITE: for character device using PIO */ /* READ entry point same except for direction of transfer */ int gbdwrite(dev_t dev, uio_t *uio) { int unit = geteminor(dev)&1; struct gbd_info *inf = &gbd_globals[unit]; int size, err=0, lk; /* Exclude any other top-half (read/write) user */ psema(&inf->use_lock,PZERO) /* while there is data to transfer */ while((size=uio->uio_resid) > 0) { /* Transfer no more than GBD_MEMSIZE bytes per operation */ size = (size < GBD_MEMSIZE) ? size : GBD_MEMSIZE; /* Copy data from user-process memory to board memory. * uiomove() updates uio fields and copies data */ if(! (err=uiomove(inf->gbd_memory, size, UIO_WRITE, uio)) ) break; /* Block out the interrupt handler with a spinlock, then * program the device to start the transfer. */ lk = mutex_spinlock(&inf->reg_lock); inf->gbd_device->count = size; inf->gbd_device->command = GBD_GO; inf->gbd_state = GBD_INTR_PEND; /* validate an interrupt */ /* Give up the spinlock and sleep until gdbintr() signals */ sv_wait(&inf->intr_wait,PZERO,&inf->reg_lock,lk); } /* while(size) */ vsema(&inf->use_lock); /* let another process use board */ return err; } /* INTERRUPT: for PIO only board */ /* ARGSUSED1 */ void gbdintr(int unit, struct eframe_s *ef) { register struct gbd_info *inf = &gbd_globals[unit]; int lk; /* get exclusive use of device regs from upper-half */ lk = mutex_spinlock(&inf->reg_lock); /* if the interrupt is not from our device, ignore it */ if(inf->gbd_device->status & GBD_INTR_PEND) { /* MISSING: test device status, clean up after interrupt, * post errors into inf->state for upper-half to see. */ /* Provided the upper-half expected this, wake it up */ if (inf->gbd_state & GBD_INTR_PEND) sv_signal(&inf->intr_wait); } mutex_spinunlock(&inf->reg_lock,lk); } #else /******** DMA version of driver ************/ void gbd_strategy(struct buf *); /* WRITE entry point (for character driver of DMA board). * Call uiophysio() to set up and call gbd_strategy routine, * where the transfer is actually done. */ int gbdwrite(dev_t dev, uio_t *uiop) { return uiophysio((int (*)())gbd_strategy, 0, dev, B_WRITE, uiop); } /* READ entry point same except for direction of transfer */ #if GBD_NUM_DMA_PGS > 0 /* STRATEGY for hardware scatter/gather DMA support. * Called from gbdwrite()/gbdread() via physio(). * Called from file-system/paging code directly. */ void gbd_strategy(register struct buf *bp) { int unit = geteminor(bp->b_edev)&1; register struct gbd_info *inf = &gbd_globals[unit]; register gbd_regs *regs = inf->gbd_device; volatile paddr_t *sgregisters; int npages; int i, lk; caddr_t v_addr; /* Get the kernel virtual address of the data. Note that * b_dmaaddr is NULL when the BP_ISMAPPED(bp) macro * indicates false; in that case, the field bp->b_pages * is a pointer to a linked list of pfdat structure * pointers; that saves creating a virtual mapping and * then decoding that mapping back to physical addresses. * BP_ISMAPPED will never be false for character devices, * only block devices. */ if(!BP_ISMAPPED(bp)) { cmn_err(CE_WARN, "gbd driver can't handle unmapped buffers"); bp->b_flags |= B_ERROR; iodone(bp); return; } v_addr = bp->b_dmaaddr; /* Compute number of pages affected by this request. * The numpages() macro (sysmacros.h) returns the number of pages * that span a given length starting at a given address, allowing * for partial pages. Unrealistically, we limit this to the * number of scatter/gather registers on board. * Note that this sample driver doesn't handle the * case of requests > than # of registers! */ npages = numpages (v_addr, bp->b_bcount); if(npages > GBD_NUM_DMA_PGS) { bp->b_resid = IO_NBPP * (npages - GBD_NUM_DMA_PGS); npages = GBD_NUM_DMA_PGS; cmn_err(CE_WARN, "request too large, only %d pages max", npages); } /* Get exclusive upper-half use of device. The sema is released * wherever iodone() is called, here or in the int handler. */ psema(&inf->use_lock,PZERO) inf->curbp = bp; /* Get exclusive use of the device regs, blocking the int handler */ lk = mutex_spinlock(&inf->reg_lock); /* MISSING: set up board to transfer npages discreet segments. */ /* Get address of the scatter-gather registers */ sgregisters = regs->sgregisters; /* Provide the beginning byte offset and count to the device. */ regs->offset = io_poff(bp->b_dmaaddr); /* in immu.h */ regs->count = (IO_NBPP - inf->gbd_device->offset) + (npages-1)*IO_NBPP; /* Translate the virtual address of each page to a * physical page number and load it into the next * scatter-gather register. The btoct(K) macro * converts the byte value to a page value after * rounding down the byte value to a full page. */ for (i = 0; i < npages; i++) { *sgregisters++ = btoct(kvtophys(v_addr)); v_addr += IO_NBPP; } if ((bp->b_flags & B_READ) == 0) regs->direction = GBD_WRITE; else regs->direction = GBD_READ; regs->command = GBD_GO; /* start DMA */ /* release use of the device regs to the interrupt handler */ mutex_spinunlock(inf->reg_lock,lk); /* and return; upper layers of kernel wait for iodone(bp) */ } /* INTERRUPT: for hardware DMA support. This is over-simplified * because the above strategy routine never accepts a transfer * larger than the device can handle in a single operation. */ /* ARGSUSED1 */ void gbdintr(int unit, struct eframe_s *ef) { register struct gbd_info *inf = &gbd_globals[unit]; register gbd_regs *regs = inf->gbd_device; int error = 0; int lk; /* get exclusive use if device regs from upper-half */ lk = mutex_spinlock(&inf->reg_lock); /* If interrupt was not from this device, exit quick */ if (! (regs->status & GBD_INTR_PEND) ) { mutex_spinunlock(&inf->reg_lock,lk); return; } /* MISSING: read board registers, clear interrupt, * and note any errors in the "error" variable. */ if(error) inf->curbp->b_flags |= B_ERROR; /* release lock on exclusive use of device regs */ mutex_spinunlock(&inf->reg_lock,lk); /* wake up any kernel/file-system waiting for this I/O */ iodone(inf->curbp); /* unlock use of device to other upper-half driver code */ vsema(&inf->use_lock); } #else /****** GBD_NUM_DMA_PGS == 0; no hardware scatter/gather ******/ /* STRATEGY: for software-controlled scatter/gather. * Called from the gbdwrite() routine via uiophysio(). */ void gbd_strategy(struct buf *bp) { int unit = geteminor(bp->b_edev)&1; register struct gbd_info *inf = &gbd_globals[unit]; register gbd_regs *regs = inf->gbd_device; int lk; /* Get the kernel virtual address of the data; note * b_dmaaddr may be NULL if the BP_ISMAPPED(bp) macro * indicates false; in that case, the field bp->b_pages * is a pointer to a linked list of pfdat structure * pointers; that saves creating a virtual mapping and * then decoding that mapping back to physical addresses. * BP_ISMAPPED will never be false for character devices, * only block devices. */ if(!BP_ISMAPPED(bp)) { cmn_err(CE_WARN, "gbd driver can't handle unmapped buffers"); bp->b_flags |= B_ERROR; iodone(bp); return; } /* Get exclusive upper-half use of device. The sema is released * wherever iodone() is called, here or in the int handler. */ psema(&inf->use_lock,PZERO) inf->curbp = bp; /* Initialize the current transfer address and count. * The first transfer should finish the rest of the * page, but do no more than the total byte count. */ inf->curaddr = bp->b_dmaaddr; inf->totcount = bp->b_bcount; inf->curcount = IO_NBPP - io_poff(inf->curaddr); if (bp->b_bcount < inf->curcount) inf->curcount = bp->b_bcount; /* Get exclusive use of the device regs and start the transfer * of the first/only segment of data. */ lk = mutex_spinlock(&inf->reg_lock); regs->startaddr = kvtophys(inf->curaddr); regs->count = inf->curcount; regs->direction = (bp->b_flags & B_READ) ? GBD_READ : GBD_WRITE; regs->command = GBD_GO; /* start DMA */ /* release use of the device regs to the interrupt handler */ mutex_spinunlock(inf->reg_lock,lk); /* and return; upper layers of kernel wait for iodone(bp) */ } /* INTERRUPT: for software scatter/gather. This version is more typical * of boards that do have DMA, and more typical of devices that support * block i/o, as opposed to character i/o. */ /* ARGSUSED1 */ void gbdintr(int unit, struct eframe_s *ef) { register struct gbd_info *inf = &gbd_globals[unit]; register gbd_regs *regs = inf->gbd_device; register buf_t *bp = inf->curbp; int error = 0; int lk; /* get exclusive use if device regs from upper-half */ lk = mutex_spinlock(&inf->reg_lock); /* If interrupt was not from this device, exit quick */ if (! (regs->status & GBD_INTR_PEND) ) { mutex_spinunlock(&inf->reg_lock,lk); return; } /* MISSING: read board registers, clear interrupt, * and note any errors in the "error" variable. */ if(error) { bp->b_resid = inf->totcount; /* show bytes undone */ bp->b_flags |= B_ERROR; /* flag error in transfer */ iodone(bp); /* we are done, tell upper layers */ vsema(&inf->use_lock); /* make device available */ } else { /* Note the successful transfer of one segment. */ inf->curaddr += inf->curcount; inf->totcount -= inf->curcount; if(inf->totcount <= 0) { iodone(bp); /* we are done, tell upper layers */ vsema(&inf->use_lock); /* make device available */ } else { /* More data to transfer. Reprogram the board for * the next segment and start the next DMA. */ inf->curcount = (inf->totcount < IO_NBPP) ? inf->totcount : IO_NBPP; regs->startaddr = kvtophys(inf->curaddr); regs->count = inf->curcount; regs->direction = (bp->b_flags & B_READ) ? GBD_READ : GBD_WRITE; regs->command = GBD_GO; /* start next DMA */ } } /* release lock on exclusive use of device regs */ mutex_spinunlock(&inf->reg_lock,lk); } #endif /* GBD_NUM_DMA_PGS */ #endif /* GBD_NODMA */