diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 211e5b0..e5bb922 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -25,7 +25,7 @@ ffb-objs := ffb_drv.o ffb_context.o savage-objs := savage_drv.o savage_bci.o savage_state.o via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \ via_video.o via_dmablit.o -mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o +mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o mach64_dmablit.o nv-objs := nv_drv.o ifeq ($(CONFIG_COMPAT),y) diff --git a/linux-core/mach64_dmablit.c b/linux-core/mach64_dmablit.c new file mode 100644 index 0000000..f5d7c56 --- /dev/null +++ b/linux-core/mach64_dmablit.c @@ -0,0 +1,478 @@ +/* mach64_dmablit.c -- PCI DMA BitBlt support for mach64 (Rage Pro) driver + * + * Copyright (C) 2006 George Sapountzis, All Rights Reserved. + * + * Based on via_dmablit.c which is: + * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * George Sapountzis + */ + +#include "drmP.h" +#include "drm.h" +#include "mach64_drm.h" +#include "mach64_drv.h" + + +#define MACH64_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT) +#define MACH64_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) +#define MACH64_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) + + +typedef struct _drm_mach64_sg_info { + enum dma_data_direction direction; + + struct page **pages; + unsigned long num_pages; + + dma_addr_t *dma_addrs; + + unsigned long num_desc; + + enum { + dr_mach64_device_mapped, + dr_mach64_pages_locked, + dr_mach64_pages_alloc, + dr_mach64_sg_init + } state; +} drm_mach64_sg_info_t; + + +/* Lock all pages into system memory and obtain a page pointer array. A seg- + * mentation violation will occur here if the calling user does not have access + * to the submitted address. + */ +static int mach64_lock_dma_pages(drm_mach64_sysblit_t * xfer, + drm_mach64_sg_info_t * vsg) +{ + int ret; + unsigned long first_pfn; + unsigned long last_pfn; + + first_pfn = MACH64_PFN(xfer->mem_addr); + last_pfn = MACH64_PFN(xfer->mem_addr + xfer->h * xfer->mem_pitch -1); + + vsg->num_pages = last_pfn - first_pfn + 1; + + vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages); + if (vsg->pages == NULL) + return DRM_ERR(ENOMEM); + memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); + + vsg->state = dr_mach64_pages_alloc; + + /* Map user pages to kernel, see LDD3, ch.15, p.436 */ + down_read(¤t->mm->mmap_sem); + ret = get_user_pages(current, current->mm, + (unsigned long) xfer->mem_addr, vsg->num_pages, + (vsg->direction == DMA_FROM_DEVICE), 0, + vsg->pages, NULL); + up_read(¤t->mm->mmap_sem); + + if (ret != vsg->num_pages) { + if (ret < 0) + return ret; + vsg->state = dr_mach64_pages_locked; + return DRM_ERR(EINVAL); + } + + vsg->state = dr_mach64_pages_locked; + + return 0; +} + + +static void mach64_release_dma_pages(drm_mach64_sg_info_t * vsg) +{ + int i; + + for (i = 0; i < vsg->num_pages; ++i) { + struct page *page = vsg->pages[i]; + if (page == NULL) + continue; + + if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) + SetPageDirty(page); + page_cache_release(page); + } +} + + +/* Setup a streaming DMA mapping: map user pages to DMA capable memory and fill + * in a page table from logical to bus addresses. Map whole pages to avoid + * coherence problems with partial mapping of cache lines, not sure about the + * overhead of this decision. + */ +static int mach64_map_blit_for_device(struct pci_dev * pdev, + const drm_mach64_sysblit_t * xfer, + drm_mach64_sg_info_t * vsg) +{ + int i; + + /* Allocate logical to bus address translation table */ + vsg->dma_addrs = vmalloc(sizeof(dma_addr_t) * vsg->num_pages); + if (vsg->dma_addrs == NULL) + return DRM_ERR(ENOMEM); + memset(vsg->dma_addrs, 0, sizeof(dma_addr_t) * vsg->num_pages); + + /* Map whole pages, see LDD3, ch.15, p.450 */ + for (i = 0; i < vsg->num_pages; ++i) { + vsg->dma_addrs[i] = dma_map_page(&pdev->dev, vsg->pages[i], + 0, PAGE_SIZE, vsg->direction); + } + + vsg->state = dr_mach64_device_mapped; + + return 0; +} + + +static void mach64_unmap_blit_from_device(struct pci_dev * pdev, + drm_mach64_sg_info_t * vsg) +{ + int i; + + for (i = 0; i < vsg->num_pages; ++i) { + dma_unmap_page(&pdev->dev, vsg->dma_addrs[i], + PAGE_SIZE, vsg->direction); + } + + vfree(vsg->dma_addrs); +} + + +#define mach64_add_desc_entry(_table) \ +do { \ + unsigned long mem_pfn = MACH64_PFN(xfer->mem_addr); \ + unsigned long cur_pfn = MACH64_PFN(cur_mem); \ + dma_addr_t cur_dma; \ + unsigned eol; \ + \ + cur_dma = vsg->dma_addrs[cur_pfn - mem_pfn] + MACH64_PGOFF(cur_mem); \ + \ + eol = (num_desc == vsg->num_desc - 1) ? MACH64_DMA_EOL : 0x0; \ + \ + _table[MACH64_DMA_FRAME_BUF_OFFSET] = cpu_to_le32(cur_fb); \ + _table[MACH64_DMA_SYS_MEM_ADDR] = cpu_to_le32(cur_dma); \ + _table[MACH64_DMA_COMMAND] = cpu_to_le32(page_len | eol); \ + _table[MACH64_DMA_RESERVED] = 0; \ +} while (0) + + +/* Fill in the DMA descriptor table. Each table entry contains the framebuffer + * offset and the bus address of the data to be transfered. Memory lines that + * span multiple virtual pages get multiple DMA descriptors, one for each page + * they span, because per-descriptor data chunks must be continuous in physical + * memory. + * + * If `fill_flag' is not set, only count how many descriptors are needed. + */ +static void mach64_fill_desc_tbl(drm_device_t * dev, + const drm_mach64_sysblit_t * xfer, + drm_mach64_sg_info_t * vsg, int fill_flag) +{ + drm_mach64_private_t *dev_priv = dev->dev_private; + + unsigned char *mem_addr = xfer->mem_addr; + unsigned char *cur_mem; + uint32_t fb_addr = xfer->fb_addr; + uint32_t cur_fb; + unsigned long line_len; + unsigned page_len; + int num_desc = 0; + int cur_line; + + uint32_t *table = (uint32_t *) dev_priv->ring.start; + + for (cur_line = 0; cur_line < xfer->h; ++cur_line) { + line_len = xfer->w_bytes; + cur_mem = mem_addr; + cur_fb = fb_addr; + + while (line_len > 0) { + page_len = min(PAGE_SIZE - MACH64_PGOFF(cur_mem), + line_len); + line_len -= page_len; + + if (fill_flag) { + mach64_add_desc_entry(table); + table += 4; + } + + num_desc++; + cur_mem += page_len; + cur_fb += page_len; + } + + mem_addr += xfer->mem_pitch; + fb_addr += xfer->fb_pitch; + } + + if (!fill_flag) + vsg->num_desc = num_desc; +} + + +static void mach64_fire_dmablit(drm_device_t * dev, + drm_mach64_sg_info_t * vsg) +{ + drm_mach64_private_t *dev_priv = dev->dev_private; + int direction = (vsg->direction == DMA_TO_DEVICE) ? 0x0 : 0x1; + + /* mach64_ring_start() */ + + if (mach64_do_wait_for_idle(dev_priv) < 0) { + mach64_do_engine_reset(dev_priv); + } + + /* enable bus mastering and block 1 registers */ + MACH64_WRITE(MACH64_BUS_CNTL, (MACH64_READ(MACH64_BUS_CNTL) + & ~MACH64_BUS_MASTER_DIS) | MACH64_BUS_EXT_REG_EN); + mach64_do_wait_for_idle(dev_priv); + + /* reset descriptor table head and kick off the transfer */ + MACH64_WRITE(MACH64_BM_SYSTEM_TABLE, + dev_priv->ring.start_addr | direction); +} + + +/* FIXME: + * From the point of view of the DRM device, the dmablit ioctl shares the bus + * mastering engine with the existing ioctl's. However, no proper locking is + * implemented at the DRM device level, access to the bus mastering engine is + * coordinated at the Xserver/DRI level by the DRI lock. + * - the existing ioctl's are only used by the DRI + * - the dmablit ioctl is only used by the Xserver + * - the Xserver calls mach64_dma_idle before accessing the GPU + * - the dmablit ioctl is blocking and uninterruptible + * + * The dmablit interface is blocking and uninterruptiple (the wating for IRQ + * part). Fixing this requires making the DRM and the DDX wait_for_idle func- + * tions aware of an ongoing bus mastering operation. + */ +static void mach64_dmablit_sync(drm_device_t * dev, + drm_mach64_sg_info_t * vsg) +{ + drm_mach64_private_t *dev_priv = dev->dev_private; + int bm0, bm1, bmx; + +#if 0 + int status; + int i; + + for (i = 0; i < dev_priv->usec_timeout; i++) { + status = MACH64_READ(MACH64_BM_STATUS); + + if ((status & 0x24000000) == 0) + break; + + DRM_UDELAY(1); + } + + if (i == dev_priv->usec_timeout) + DRM_ERROR("timeout\n"); +#endif + +#if 0 + bm0 = MACH64_READ(MACH64_BM_STATUS); +#endif + + wait_for_completion(&dev_priv->bm_eol_completion); + +#if 0 + bm1 = MACH64_READ(MACH64_BM_STATUS); + + bmx = bm0 ^ bm1; + + if ((bmx & 0xff000000) != 0x24000000) + DRM_ERROR("bmx %x - %x %x\n", bmx, bm0, bm1); + + if ((bm1 & 0x24000000)) + DRM_ERROR("bm1 %x - %x %x\n", bmx, bm0, bm1); +#endif + + /* mach64_ring_stop() */ + + /* disable busmastering but keep the block 1 registers enabled */ + mach64_do_wait_for_idle(dev_priv); + MACH64_WRITE(MACH64_BUS_CNTL, MACH64_READ(MACH64_BUS_CNTL) + | MACH64_BUS_MASTER_DIS | MACH64_BUS_EXT_REG_EN); +} + + +/* Free up all resources for a blit. It is usable even if the blit info has + * only been partially built as long as the status enum is consistent with the + * actual status of the used resources. + */ +static void mach64_free_sg_info(struct pci_dev * pdev, + drm_mach64_sg_info_t * vsg) +{ + switch(vsg->state) { + case dr_mach64_device_mapped: + mach64_unmap_blit_from_device(pdev, vsg); + case dr_mach64_pages_locked: + mach64_release_dma_pages(vsg); + case dr_mach64_pages_alloc: + vfree(vsg->pages); + default: + vsg->state = dr_mach64_sg_init; + } +} + + +static int mach64_build_sg_info(drm_device_t * dev, drm_mach64_sg_info_t * vsg, + drm_mach64_sysblit_t * xfer) +{ + int ret; + + vsg->state = dr_mach64_sg_init; + + vsg->direction = (xfer->to_fb) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + + if (xfer->h <= 0 || xfer->w_bytes <= 0) { + DRM_ERROR("Zero size bitblt.\n"); + return DRM_ERR(EINVAL); + } + + /* We allow a negative fb stride to allow flipping of images in + * transfer. + */ + if (xfer->mem_pitch < xfer->w_bytes || + abs(xfer->fb_pitch) < xfer->w_bytes) { + DRM_ERROR("Invalid frame-buffer / memory stride.\n"); + return DRM_ERR(EINVAL); + } + + /* The byte alignment of the two addresses, i.e. bits fb_addr[1:0] and + * bits mem_addr[1:0] should match. + */ + if (((xfer->fb_addr & 3) != ((unsigned int)xfer->mem_addr & 3)) || + ((xfer->fb_pitch & 3) != (xfer->mem_pitch & 3))) { + DRM_ERROR("Byte alignment mismatch of memory and framebuffer" + " addresses.\n"); + return DRM_ERR(EINVAL); + } + + /* Don't lock an arbitrary large number of pages, since that causes a + * DOS security hole. This is not covered by the `num_desc' check below + * which bounds `w_bytes', not `mem_pitch'. + */ + if ((xfer->h > 2048) || (xfer->h * xfer->mem_pitch > 2 * 1024 * 4096)) { + DRM_ERROR("Too large PCI DMA bitblt.\n"); + return DRM_ERR(EINVAL); + } + + /* We allow for 2K DMA descriptors which fit in a table of size 32 KB, + * each descriptor consists of 4 DWORDs, i.e. 16 bytes. + * + * This allows for pixmaps as large as 1024x1024 or transfers with + * h=1024, w_bytes=4096. Each descriptor can transfer up to 4 KB and + * pages have a minimum size of 4 KB, thus each line can span at most + * two pages and each chunk can be served with a single descriptor. + * Thus at most 2 descriptors/line for 1K lines. + */ + mach64_fill_desc_tbl(dev, xfer, vsg, 0); + if (vsg->num_desc > 2048) { + DRM_ERROR("Too many DRM descriptors required.\n"); + return DRM_ERR(EINVAL); + } + + ret = mach64_lock_dma_pages(xfer, vsg); + if (ret < 0) { + DRM_ERROR("Could not lock DMA pages.\n"); + mach64_free_sg_info(dev->pdev, vsg); + return ret; + } + + DRM_DEBUG("(%4u,%4u) num_desc=%4lu num_pages=%4lu\n", + xfer->w_bytes, xfer->h, vsg->num_desc, vsg->num_pages); + + ret = mach64_map_blit_for_device(dev->pdev, xfer, vsg); + if (ret < 0) { + DRM_ERROR("Could not allocate DMA addresses array.\n"); + mach64_free_sg_info(dev->pdev, vsg); + return ret; + } + + mach64_fill_desc_tbl(dev, xfer, vsg, 1); + + mach64_fire_dmablit(dev, vsg); + + mach64_dmablit_sync(dev, vsg); + + return 0; +} + + +static int mach64_dma_dispatch_sysblit(drm_device_t * dev, + drm_mach64_sysblit_t * xfer) +{ + drm_mach64_sg_info_t *vsg; + int ret; + + vsg = drm_alloc(sizeof(*vsg), DRM_MEM_DRIVER); + if (vsg == NULL) + return DRM_ERR(ENOMEM); + + ret = mach64_build_sg_info(dev, vsg, xfer); + if (ret < 0) { + drm_free(vsg, sizeof(*vsg), DRM_MEM_DRIVER); + return ret; + } + + mach64_free_sg_info(dev->pdev, vsg); + drm_free(vsg, sizeof(*vsg), DRM_MEM_DRIVER); + + return 0; +} + + +int mach64_dma_sysblit(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_mach64_private_t *dev_priv = dev->dev_private; + drm_mach64_sysblit_t xfer; + int ret; + + LOCK_TEST_WITH_RETURN(dev, filp); + + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + + if (!dev->irq) { + DRM_ERROR("%s requires enabling IRQ\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + + DRM_COPY_FROM_USER_IOCTL(xfer, (drm_mach64_sysblit_t *) data, + sizeof(xfer)); + + DRM_DEBUG("%s: pid=%d\n", + __FUNCTION__, DRM_CURRENTPID); + + ret = mach64_dma_dispatch_sysblit(dev, &xfer); + + return ret; +} diff --git a/shared-core/mach64_dma.c b/shared-core/mach64_dma.c index 3a5fdee..6bd787c 100644 --- a/shared-core/mach64_dma.c +++ b/shared-core/mach64_dma.c @@ -190,7 +190,7 @@ static void mach64_ring_reset(drm_mach64 ring->space = ring->size; MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, - ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); + ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_32KB); dev_priv->ring_running = 0; } @@ -691,7 +691,7 @@ static int mach64_bm_dma_test(drm_device DRM_DEBUG("starting DMA transfer...\n"); MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, - dev_priv->ring.start_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); + dev_priv->ring.start_addr | MACH64_CIRCULAR_BUF_SIZE_32KB); MACH64_WRITE(MACH64_SRC_CNTL, MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC | @@ -892,7 +892,10 @@ static int mach64_do_dma_init(drm_device } } - dev_priv->ring.size = 0x4000; /* 16KB */ + /* setup DMA ring size, logical and physical address, increase to 32KB + * for dmablit SG list (see mach64_dmablit.c) + */ + dev_priv->ring.size = 0x8000; /* 32KB */ dev_priv->ring.start = dev_priv->ring_map->handle; dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset; @@ -947,7 +950,10 @@ static int mach64_do_dma_init(drm_device mach64_do_wait_for_fifo(dev_priv, 1); MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, (dev_priv->ring. - head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB)); + head_addr | MACH64_CIRCULAR_BUF_SIZE_32KB)); + + /* init bus master end of transfer completion */ + init_completion(&dev_priv->bm_eol_completion); /* init frame counter */ dev_priv->sarea_priv->frames_queued = 0; @@ -1105,7 +1111,7 @@ int mach64_do_dispatch_pseudo_dma(drm_ma return ret; } MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, - ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); + ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_32KB); DRM_DEBUG("%s completed\n", __FUNCTION__); return 0; diff --git a/shared-core/mach64_drm.h b/shared-core/mach64_drm.h index 083f959..3893abb 100644 --- a/shared-core/mach64_drm.h +++ b/shared-core/mach64_drm.h @@ -162,6 +162,7 @@ #define DRM_MACH64_VERTEX 0x05 #define DRM_MACH64_BLIT 0x06 #define DRM_MACH64_FLUSH 0x07 #define DRM_MACH64_GETPARAM 0x08 +#define DRM_MACH64_SYSBLIT 0x09 #define DRM_IOCTL_MACH64_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_INIT, drm_mach64_init_t) #define DRM_IOCTL_MACH64_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_IDLE ) @@ -171,7 +172,8 @@ #define DRM_IOCTL_MACH64_CLEAR #define DRM_IOCTL_MACH64_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_VERTEX, drm_mach64_vertex_t) #define DRM_IOCTL_MACH64_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_BLIT, drm_mach64_blit_t) #define DRM_IOCTL_MACH64_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_FLUSH ) -#define DRM_IOCTL_MACH64_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_MACH64_GETPARAM, drm_mach64_getparam_t) +#define DRM_IOCTL_MACH64_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MACH64_GETPARAM, drm_mach64_getparam_t) +#define DRM_IOCTL_MACH64_SYSBLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_SYSBLIT, drm_mach64_sysblit_t) /* Buffer flags for clears */ @@ -253,4 +255,17 @@ typedef struct drm_mach64_getparam { void *value; } drm_mach64_getparam_t; +typedef struct drm_mach64_sysblit { + unsigned int h; + unsigned int w_bytes; + + unsigned int fb_addr; + unsigned int fb_pitch; + + char *mem_addr; + unsigned int mem_pitch; + + int to_fb; +} drm_mach64_sysblit_t; + #endif diff --git a/shared-core/mach64_drv.h b/shared-core/mach64_drv.h index bb8b309..68342f1 100644 --- a/shared-core/mach64_drv.h +++ b/shared-core/mach64_drv.h @@ -106,6 +106,8 @@ typedef struct drm_mach64_private { drm_local_map_t *ring_map; drm_local_map_t *dev_buffers; /* this is a pointer to a structure in dev */ drm_local_map_t *agp_textures; + + struct completion bm_eol_completion; /* bus master end of transfer completion */ } drm_mach64_private_t; extern drm_ioctl_desc_t mach64_ioctls[]; @@ -145,6 +147,7 @@ extern int mach64_dma_swap(DRM_IOCTL_ARG extern int mach64_dma_vertex(DRM_IOCTL_ARGS); extern int mach64_dma_blit(DRM_IOCTL_ARGS); extern int mach64_get_param(DRM_IOCTL_ARGS); +extern int mach64_dma_sysblit(DRM_IOCTL_ARGS); extern int mach64_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence); @@ -617,7 +620,7 @@ static __inline__ void mach64_ring_start /* reset descriptor table ring head */ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, - ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); + ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_32KB); dev_priv->ring_running = 1; } @@ -631,7 +634,7 @@ static __inline__ void mach64_ring_resum /* reset descriptor table ring head */ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, - ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); + ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_32KB); if (dev_priv->driver_mode == MACH64_MODE_MMIO) { mach64_do_dispatch_pseudo_dma(dev_priv); diff --git a/shared-core/mach64_irq.c b/shared-core/mach64_irq.c index 663642d..a450aa0 100644 --- a/shared-core/mach64_irq.c +++ b/shared-core/mach64_irq.c @@ -46,6 +46,7 @@ irqreturn_t mach64_driver_irq_handler(DR drm_mach64_private_t *dev_priv = (drm_mach64_private_t *) dev->dev_private; int status; + int handled = 0; status = MACH64_READ(MACH64_CRTC_INT_CNTL); @@ -65,9 +66,20 @@ irqreturn_t mach64_driver_irq_handler(DR atomic_inc(&dev->vbl_received); DRM_WAKEUP(&dev->vbl_queue); drm_vbl_send_signals(dev); - return IRQ_HANDLED; + handled = 1; } - return IRQ_NONE; + + /* BM_EOL (bus master end of transfer) interrupt */ + if (status & MACH64_CRTC_BUSMASTER_EOL_INT) { + MACH64_WRITE(MACH64_CRTC_INT_CNTL, + (status & ~MACH64_CRTC_INT_ACKS) + | MACH64_CRTC_BUSMASTER_EOL_INT); + + complete(&dev_priv->bm_eol_completion); + handled = 1; + } + + return IRQ_RETVAL(handled); } int mach64_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) @@ -102,6 +114,10 @@ void mach64_driver_irq_preinstall(drm_de /* Disable and clear VBLANK interrupt */ MACH64_WRITE(MACH64_CRTC_INT_CNTL, (status & ~MACH64_CRTC_VBLANK_INT_EN) | MACH64_CRTC_VBLANK_INT); + + /* Disable and clear BM_EOL interrupt */ + MACH64_WRITE(MACH64_CRTC_INT_CNTL, (status & ~MACH64_CRTC_BUSMASTER_EOL_INT_EN) + | MACH64_CRTC_BUSMASTER_EOL_INT); } void mach64_driver_irq_postinstall(drm_device_t * dev) @@ -113,6 +129,10 @@ void mach64_driver_irq_postinstall(drm_d MACH64_WRITE(MACH64_CRTC_INT_CNTL, MACH64_READ(MACH64_CRTC_INT_CNTL) | MACH64_CRTC_VBLANK_INT_EN); + /* Turn on BM_EOL interrupt */ + MACH64_WRITE(MACH64_CRTC_INT_CNTL, MACH64_READ(MACH64_CRTC_INT_CNTL) + | MACH64_CRTC_BUSMASTER_EOL_INT_EN); + DRM_DEBUG("after install CRTC_INT_CTNL: 0x%08x\n", MACH64_READ(MACH64_CRTC_INT_CNTL)); @@ -131,6 +151,12 @@ void mach64_driver_irq_uninstall(drm_dev ~MACH64_CRTC_VBLANK_INT_EN) | MACH64_CRTC_VBLANK_INT); + /* Disable and clear BM_EOL interrupt */ + MACH64_WRITE(MACH64_CRTC_INT_CNTL, + (MACH64_READ(MACH64_CRTC_INT_CNTL) & + ~MACH64_CRTC_BUSMASTER_EOL_INT_EN) + | MACH64_CRTC_BUSMASTER_EOL_INT); + DRM_DEBUG("after uninstall CRTC_INT_CTNL: 0x%08x\n", MACH64_READ(MACH64_CRTC_INT_CNTL)); } diff --git a/shared-core/mach64_state.c b/shared-core/mach64_state.c index 38cefca..e810948 100644 --- a/shared-core/mach64_state.c +++ b/shared-core/mach64_state.c @@ -50,6 +50,7 @@ drm_ioctl_desc_t mach64_ioctls[] = { [DRM_IOCTL_NR(DRM_MACH64_BLIT)] = {mach64_dma_blit, DRM_AUTH}, [DRM_IOCTL_NR(DRM_MACH64_FLUSH)] = {mach64_dma_flush, DRM_AUTH}, [DRM_IOCTL_NR(DRM_MACH64_GETPARAM)] = {mach64_get_param, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_MACH64_SYSBLIT)] = {mach64_dma_sysblit, DRM_AUTH}, }; int mach64_max_ioctl = DRM_ARRAY_SIZE(mach64_ioctls);