提交 22f579c6 编写于 作者: D Dave Airlie 提交者: Dave Airlie

drm: Add via unichrome support

Add DRM device driver for VIA Unichrome chipsets

From: Unichrome Project http://unichrome.sf.net, Erdi Chen, Thomas Hellstrom    Signed-off-by: Dave Airlie <airlied@linux.ie>
上级 99f95e52
......@@ -96,3 +96,10 @@ config DRM_SIS
chipset. If M is selected the module will be called sis. AGP
support is required for this driver to work.
config DRM_VIA
tristate "Via unichrome video cards"
depends on DRM
help
Choose this option if you have a Via unichrome or compatible video
chipset. If M is selected the module will be called via.
......@@ -18,6 +18,7 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o
ffb-objs := ffb_drv.o ffb_context.o
sis-objs := sis_drv.o sis_ds.o sis_mm.o
via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o
ifeq ($(CONFIG_COMPAT),y)
drm-objs += drm_ioc32.o
......@@ -35,4 +36,5 @@ obj-$(CONFIG_DRM_I830) += i830.o
obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_FFB) += ffb.o
obj-$(CONFIG_DRM_SIS) += sis.o
obj-$(CONFIG_DRM_VIA) +=via.o
......@@ -223,3 +223,10 @@
{0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#define viadrv_PCI_IDS \
{0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
此差异已折叠。
/* via_dma.c -- DMA support for the VIA Unichrome/Pro
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
* All Rights Reserved.
*
* Copyright 2004 The Unichrome project.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Tungsten Graphics,
* Erdi Chen,
* Thomas Hellstrom.
*/
#include "drmP.h"
#include "drm.h"
#include "via_drm.h"
#include "via_drv.h"
#include "via_3d_reg.h"
#define CMDBUF_ALIGNMENT_SIZE (0x100)
#define CMDBUF_ALIGNMENT_MASK (0x0ff)
/* defines for VIA 3D registers */
#define VIA_REG_STATUS 0x400
#define VIA_REG_TRANSET 0x43C
#define VIA_REG_TRANSPACE 0x440
/* VIA_REG_STATUS(0x400): Engine Status */
#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
#define SetReg2DAGP(nReg, nData) { \
*((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
*((uint32_t *)(vb) + 1) = (nData); \
vb = ((uint32_t *)vb) + 2; \
dev_priv->dma_low +=8; \
}
#define via_flush_write_combine() DRM_MEMORYBARRIER()
#define VIA_OUT_RING_QW(w1,w2) \
*vb++ = (w1); \
*vb++ = (w2); \
dev_priv->dma_low += 8;
static void via_cmdbuf_start(drm_via_private_t * dev_priv);
static void via_cmdbuf_pause(drm_via_private_t * dev_priv);
static void via_cmdbuf_reset(drm_via_private_t * dev_priv);
static void via_cmdbuf_rewind(drm_via_private_t * dev_priv);
static int via_wait_idle(drm_via_private_t * dev_priv);
static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
/*
* Free space in command buffer.
*/
static uint32_t
via_cmdbuf_space(drm_via_private_t *dev_priv)
{
uint32_t agp_base = dev_priv->dma_offset +
(uint32_t) dev_priv->agpAddr;
uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
return ((hw_addr <= dev_priv->dma_low) ?
(dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
(hw_addr - dev_priv->dma_low));
}
/*
* How much does the command regulator lag behind?
*/
static uint32_t
via_cmdbuf_lag(drm_via_private_t *dev_priv)
{
uint32_t agp_base = dev_priv->dma_offset +
(uint32_t) dev_priv->agpAddr;
uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
return ((hw_addr <= dev_priv->dma_low) ?
(dev_priv->dma_low - hw_addr) :
(dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
}
/*
* Check that the given size fits in the buffer, otherwise wait.
*/
static inline int
via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
{
uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
uint32_t cur_addr, hw_addr, next_addr;
volatile uint32_t *hw_addr_ptr;
uint32_t count;
hw_addr_ptr = dev_priv->hw_addr_ptr;
cur_addr = dev_priv->dma_low;
next_addr = cur_addr + size + 512*1024;
count = 1000000;
do {
hw_addr = *hw_addr_ptr - agp_base;
if (count-- == 0) {
DRM_ERROR("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
hw_addr, cur_addr, next_addr);
return -1;
}
} while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
return 0;
}
/*
* Checks whether buffer head has reach the end. Rewind the ring buffer
* when necessary.
*
* Returns virtual pointer to ring buffer.
*/
static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
unsigned int size)
{
if ((dev_priv->dma_low + size + 4*CMDBUF_ALIGNMENT_SIZE) > dev_priv->dma_high) {
via_cmdbuf_rewind(dev_priv);
}
if (via_cmdbuf_wait(dev_priv, size) != 0) {
return NULL;
}
return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
}
int via_dma_cleanup(drm_device_t * dev)
{
if (dev->dev_private) {
drm_via_private_t *dev_priv =
(drm_via_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start) {
via_cmdbuf_reset(dev_priv);
drm_core_ioremapfree(&dev_priv->ring.map, dev);
dev_priv->ring.virtual_start = NULL;
}
}
return 0;
}
static int via_initialize(drm_device_t * dev,
drm_via_private_t * dev_priv,
drm_via_dma_init_t * init)
{
if (!dev_priv || !dev_priv->mmio) {
DRM_ERROR("via_dma_init called before via_map_init\n");
return DRM_ERR(EFAULT);
}
if (dev_priv->ring.virtual_start != NULL) {
DRM_ERROR("%s called again without calling cleanup\n",
__FUNCTION__);
return DRM_ERR(EFAULT);
}
if (!dev->agp || !dev->agp->base) {
DRM_ERROR("%s called with no agp memory available\n",
__FUNCTION__);
return DRM_ERR(EFAULT);
}
dev_priv->ring.map.offset = dev->agp->base + init->offset;
dev_priv->ring.map.size = init->size;
dev_priv->ring.map.type = 0;
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
drm_core_ioremap(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
via_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return DRM_ERR(ENOMEM);
}
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
dev_priv->dma_ptr = dev_priv->ring.virtual_start;
dev_priv->dma_low = 0;
dev_priv->dma_high = init->size;
dev_priv->dma_wrap = init->size;
dev_priv->dma_offset = init->offset;
dev_priv->last_pause_ptr = NULL;
dev_priv->hw_addr_ptr = dev_priv->mmio->handle + init->reg_pause_addr;
via_cmdbuf_start(dev_priv);
return 0;
}
int via_dma_init(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
drm_via_dma_init_t init;
int retcode = 0;
DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t *) data,
sizeof(init));
switch (init.func) {
case VIA_INIT_DMA:
if (!capable(CAP_SYS_ADMIN))
retcode = DRM_ERR(EPERM);
else
retcode = via_initialize(dev, dev_priv, &init);
break;
case VIA_CLEANUP_DMA:
if (!capable(CAP_SYS_ADMIN))
retcode = DRM_ERR(EPERM);
else
retcode = via_dma_cleanup(dev);
break;
case VIA_DMA_INITIALIZED:
retcode = (dev_priv->ring.virtual_start != NULL) ?
0: DRM_ERR( EFAULT );
break;
default:
retcode = DRM_ERR(EINVAL);
break;
}
return retcode;
}
static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
{
drm_via_private_t *dev_priv;
uint32_t *vb;
int ret;
dev_priv = (drm_via_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start == NULL) {
DRM_ERROR("%s called without initializing AGP ring buffer.\n",
__FUNCTION__);
return DRM_ERR(EFAULT);
}
if (cmd->size > VIA_PCI_BUF_SIZE) {
return DRM_ERR(ENOMEM);
}
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
return DRM_ERR(EFAULT);
/*
* Running this function on AGP memory is dead slow. Therefore
* we run it on a temporary cacheable system memory buffer and
* copy it to AGP memory when ready.
*/
if ((ret = via_verify_command_stream((uint32_t *)dev_priv->pci_buf, cmd->size, dev, 1))) {
return ret;
}
vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
if (vb == NULL) {
return DRM_ERR(EAGAIN);
}
memcpy(vb, dev_priv->pci_buf, cmd->size);
dev_priv->dma_low += cmd->size;
/*
* Small submissions somehow stalls the CPU. (AGP cache effects?)
* pad to greater size.
*/
if (cmd->size < 0x100)
via_pad_cache(dev_priv,(0x100 - cmd->size) >> 3);
via_cmdbuf_pause(dev_priv);
return 0;
}
int via_driver_dma_quiescent(drm_device_t * dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
if (!via_wait_idle(dev_priv)) {
return DRM_ERR(EBUSY);
}
return 0;
}
int via_flush_ioctl(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
LOCK_TEST_WITH_RETURN( dev, filp );
return via_driver_dma_quiescent(dev);
}
int via_cmdbuffer(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_via_cmdbuffer_t cmdbuf;
int ret;
LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t *) data,
sizeof(cmdbuf));
DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size);
ret = via_dispatch_cmdbuffer(dev, &cmdbuf);
if (ret) {
return ret;
}
return 0;
}
extern int
via_parse_command_stream(drm_device_t *dev, const uint32_t * buf, unsigned int size);
static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
drm_via_cmdbuffer_t * cmd)
{
drm_via_private_t *dev_priv = dev->dev_private;
int ret;
if (cmd->size > VIA_PCI_BUF_SIZE) {
return DRM_ERR(ENOMEM);
}
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
return DRM_ERR(EFAULT);
if ((ret = via_verify_command_stream((uint32_t *)dev_priv->pci_buf, cmd->size, dev, 0))) {
return ret;
}
ret = via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf, cmd->size);
return ret;
}
int via_pci_cmdbuffer(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_via_cmdbuffer_t cmdbuf;
int ret;
LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t *) data,
sizeof(cmdbuf));
DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf,
cmdbuf.size);
ret = via_dispatch_pci_cmdbuffer(dev, &cmdbuf);
if (ret) {
return ret;
}
return 0;
}
static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
uint32_t * vb, int qw_count)
{
for (; qw_count > 0; --qw_count) {
VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
}
return vb;
}
/*
* This function is used internally by ring buffer mangement code.
*
* Returns virtual pointer to ring buffer.
*/
static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
{
return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
}
/*
* Hooks a segment of data into the tail of the ring-buffer by
* modifying the pause address stored in the buffer itself. If
* the regulator has already paused, restart it.
*/
static int via_hook_segment(drm_via_private_t *dev_priv,
uint32_t pause_addr_hi, uint32_t pause_addr_lo,
int no_pci_fire)
{
int paused, count;
volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
via_flush_write_combine();
while(! *(via_get_dma(dev_priv)-1));
*dev_priv->last_pause_ptr = pause_addr_lo;
via_flush_write_combine();
/*
* The below statement is inserted to really force the flush.
* Not sure it is needed.
*/
while(! *dev_priv->last_pause_ptr);
dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
while(! *dev_priv->last_pause_ptr);
paused = 0;
count = 20;
while (!(paused = (VIA_READ(0x41c) & 0x80000000)) && count--);
if ((count <= 8) && (count >= 0)) {
uint32_t rgtr, ptr;
rgtr = *(dev_priv->hw_addr_ptr);
ptr = ((char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4 -
CMDBUF_ALIGNMENT_SIZE;
if (rgtr <= ptr) {
DRM_ERROR("Command regulator\npaused at count %d, address %x, "
"while current pause address is %x.\n"
"Please mail this message to "
"<unichrome-devel@lists.sourceforge.net>\n",
count, rgtr, ptr);
}
}
if (paused && !no_pci_fire) {
uint32_t rgtr,ptr;
uint32_t ptr_low;
count = 1000000;
while ((VIA_READ(VIA_REG_STATUS) & VIA_CMD_RGTR_BUSY) && count--);
rgtr = *(dev_priv->hw_addr_ptr);
ptr = ((char *)paused_at - dev_priv->dma_ptr) +
dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
ptr_low = (ptr > 3*CMDBUF_ALIGNMENT_SIZE) ?
ptr - 3*CMDBUF_ALIGNMENT_SIZE : 0;
if (rgtr <= ptr && rgtr >= ptr_low) {
VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
}
}
return paused;
}
static int via_wait_idle(drm_via_private_t * dev_priv)
{
int count = 10000000;
while (count-- && (VIA_READ(VIA_REG_STATUS) &
(VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
VIA_3D_ENG_BUSY))) ;
return count;
}
static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
uint32_t addr, uint32_t *cmd_addr_hi,
uint32_t *cmd_addr_lo,
int skip_wait)
{
uint32_t agp_base;
uint32_t cmd_addr, addr_lo, addr_hi;
uint32_t *vb;
uint32_t qw_pad_count;
if (!skip_wait)
via_cmdbuf_wait(dev_priv, 2*CMDBUF_ALIGNMENT_SIZE);
vb = via_get_dma(dev_priv);
VIA_OUT_RING_QW( HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
(VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
cmd_addr = (addr) ? addr :
agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
(cmd_addr & HC_HAGPBpL_MASK));
addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi,
*cmd_addr_lo = addr_lo);
return vb;
}
static void via_cmdbuf_start(drm_via_private_t * dev_priv)
{
uint32_t pause_addr_lo, pause_addr_hi;
uint32_t start_addr, start_addr_lo;
uint32_t end_addr, end_addr_lo;
uint32_t command;
uint32_t agp_base;
dev_priv->dma_low = 0;
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
start_addr = agp_base;
end_addr = agp_base + dev_priv->dma_high;
start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
((end_addr & 0xff000000) >> 16));
dev_priv->last_pause_ptr =
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
&pause_addr_hi, & pause_addr_lo, 1) - 1;
via_flush_write_combine();
while(! *dev_priv->last_pause_ptr);
VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
VIA_WRITE(VIA_REG_TRANSPACE, command);
VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
}
static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
{
uint32_t *vb;
via_cmdbuf_wait(dev_priv, qwords + 2);
vb = via_get_dma(dev_priv);
VIA_OUT_RING_QW( HC_HEADER2, HC_ParaType_NotTex << 16);
via_align_buffer(dev_priv,vb,qwords);
}
static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
{
uint32_t *vb = via_get_dma(dev_priv);
SetReg2DAGP(0x0C, (0 | (0 << 16)));
SetReg2DAGP(0x10, 0 | (0 << 16));
SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
}
static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
{
uint32_t agp_base;
uint32_t pause_addr_lo, pause_addr_hi;
uint32_t jump_addr_lo, jump_addr_hi;
volatile uint32_t *last_pause_ptr;
uint32_t dma_low_save1, dma_low_save2;
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
&jump_addr_lo, 0);
dev_priv->dma_wrap = dev_priv->dma_low;
/*
* Wrap command buffer to the beginning.
*/
dev_priv->dma_low = 0;
if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
DRM_ERROR("via_cmdbuf_jump failed\n");
}
via_dummy_bitblt(dev_priv);
via_dummy_bitblt(dev_priv);
last_pause_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0) -1;
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0);
*last_pause_ptr = pause_addr_lo;
dma_low_save1 = dev_priv->dma_low;
/*
* Now, set a trap that will pause the regulator if it tries to rerun the old
* command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
* and reissues the jump command over PCI, while the regulator has already taken the jump
* and actually paused at the current buffer end).
* There appears to be no other way to detect this condition, since the hw_addr_pointer
* does not seem to get updated immediately when a jump occurs.
*/
last_pause_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0) -1;
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0);
*last_pause_ptr = pause_addr_lo;
dma_low_save2 = dev_priv->dma_low;
dev_priv->dma_low = dma_low_save1;
via_hook_segment( dev_priv, jump_addr_hi, jump_addr_lo, 0);
dev_priv->dma_low = dma_low_save2;
via_hook_segment( dev_priv, pause_addr_hi, pause_addr_lo, 0);
}
static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
{
via_cmdbuf_jump(dev_priv);
}
static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
{
uint32_t pause_addr_lo, pause_addr_hi;
via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
via_hook_segment( dev_priv, pause_addr_hi, pause_addr_lo, 0);
}
static void via_cmdbuf_pause(drm_via_private_t * dev_priv)
{
via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
}
static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
{
via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
via_wait_idle(dev_priv);
}
/*
* User interface to the space and lag functions.
*/
int
via_cmdbuf_size(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_via_cmdbuf_size_t d_siz;
int ret = 0;
uint32_t tmp_size, count;
drm_via_private_t *dev_priv;
DRM_DEBUG("via cmdbuf_size\n");
LOCK_TEST_WITH_RETURN( dev, filp );
dev_priv = (drm_via_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start == NULL) {
DRM_ERROR("%s called without initializing AGP ring buffer.\n",
__FUNCTION__);
return DRM_ERR(EFAULT);
}
DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t *) data,
sizeof(d_siz));
count = 1000000;
tmp_size = d_siz.size;
switch(d_siz.func) {
case VIA_CMDBUF_SPACE:
while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz.size) && count--) {
if (!d_siz.wait) {
break;
}
}
if (!count) {
DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
ret = DRM_ERR(EAGAIN);
}
break;
case VIA_CMDBUF_LAG:
while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz.size) && count--) {
if (!d_siz.wait) {
break;
}
}
if (!count) {
DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
ret = DRM_ERR(EAGAIN);
}
break;
default:
ret = DRM_ERR(EFAULT);
}
d_siz.size = tmp_size;
DRM_COPY_TO_USER_IOCTL((drm_via_cmdbuf_size_t *) data, d_siz,
sizeof(d_siz));
return ret;
}
/*
* Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _VIA_DRM_H_
#define _VIA_DRM_H_
/* WARNING: These defines must be the same as what the Xserver uses.
* if you change them, you must change the defines in the Xserver.
*/
#ifndef _VIA_DEFINES_
#define _VIA_DEFINES_
#ifndef __KERNEL__
#include "via_drmclient.h"
#endif
#define VIA_NR_SAREA_CLIPRECTS 8
#define VIA_NR_XVMC_PORTS 10
#define VIA_NR_XVMC_LOCKS 5
#define VIA_MAX_CACHELINE_SIZE 64
#define XVMCLOCKPTR(saPriv,lockNo) \
((volatile drm_hw_lock_t *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
(VIA_MAX_CACHELINE_SIZE - 1)) & \
~(VIA_MAX_CACHELINE_SIZE - 1)) + \
VIA_MAX_CACHELINE_SIZE*(lockNo)))
/* Each region is a minimum of 64k, and there are at most 64 of them.
*/
#define VIA_NR_TEX_REGIONS 64
#define VIA_LOG_MIN_TEX_REGION_SIZE 16
#endif
#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
#define VIA_UPLOAD_CTX 0x4
#define VIA_UPLOAD_BUFFERS 0x8
#define VIA_UPLOAD_TEX0 0x10
#define VIA_UPLOAD_TEX1 0x20
#define VIA_UPLOAD_CLIPRECTS 0x40
#define VIA_UPLOAD_ALL 0xff
/* VIA specific ioctls */
#define DRM_VIA_ALLOCMEM 0x00
#define DRM_VIA_FREEMEM 0x01
#define DRM_VIA_AGP_INIT 0x02
#define DRM_VIA_FB_INIT 0x03
#define DRM_VIA_MAP_INIT 0x04
#define DRM_VIA_DEC_FUTEX 0x05
#define NOT_USED
#define DRM_VIA_DMA_INIT 0x07
#define DRM_VIA_CMDBUFFER 0x08
#define DRM_VIA_FLUSH 0x09
#define DRM_VIA_PCICMD 0x0a
#define DRM_VIA_CMDBUF_SIZE 0x0b
#define NOT_USED
#define DRM_VIA_WAIT_IRQ 0x0d
#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
#define DRM_IOCTL_VIA_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_AGP_INIT, drm_via_agp_t)
#define DRM_IOCTL_VIA_FB_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_FB_INIT, drm_via_fb_t)
#define DRM_IOCTL_VIA_MAP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_MAP_INIT, drm_via_init_t)
#define DRM_IOCTL_VIA_DEC_FUTEX DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_DEC_FUTEX, drm_via_futex_t)
#define DRM_IOCTL_VIA_DMA_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_DMA_INIT, drm_via_dma_init_t)
#define DRM_IOCTL_VIA_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_CMDBUFFER, drm_via_cmdbuffer_t)
#define DRM_IOCTL_VIA_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_VIA_FLUSH)
#define DRM_IOCTL_VIA_PCICMD DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t)
#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
drm_via_cmdbuf_size_t)
#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
/* Indices into buf.Setup where various bits of state are mirrored per
* context and per buffer. These can be fired at the card as a unit,
* or in a piecewise fashion as required.
*/
#define VIA_TEX_SETUP_SIZE 8
/* Flags for clear ioctl
*/
#define VIA_FRONT 0x1
#define VIA_BACK 0x2
#define VIA_DEPTH 0x4
#define VIA_STENCIL 0x8
#define VIDEO 0
#define AGP 1
typedef struct {
uint32_t offset;
uint32_t size;
} drm_via_agp_t;
typedef struct {
uint32_t offset;
uint32_t size;
} drm_via_fb_t;
typedef struct {
uint32_t context;
uint32_t type;
uint32_t size;
unsigned long index;
unsigned long offset;
} drm_via_mem_t;
typedef struct _drm_via_init {
enum {
VIA_INIT_MAP = 0x01,
VIA_CLEANUP_MAP = 0x02
} func;
unsigned long sarea_priv_offset;
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long agpAddr;
} drm_via_init_t;
typedef struct _drm_via_futex {
enum {
VIA_FUTEX_WAIT = 0x00,
VIA_FUTEX_WAKE = 0X01
} func;
uint32_t ms;
uint32_t lock;
uint32_t val;
} drm_via_futex_t;
typedef struct _drm_via_dma_init {
enum {
VIA_INIT_DMA = 0x01,
VIA_CLEANUP_DMA = 0x02,
VIA_DMA_INITIALIZED = 0x03
} func;
unsigned long offset;
unsigned long size;
unsigned long reg_pause_addr;
} drm_via_dma_init_t;
typedef struct _drm_via_cmdbuffer {
char *buf;
unsigned long size;
} drm_via_cmdbuffer_t;
/* Warning: If you change the SAREA structure you must change the Xserver
* structure as well */
typedef struct _drm_via_tex_region {
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char inUse; /* owned by a client, or free? */
int age; /* tracked by clients to update local LRU's */
} drm_via_tex_region_t;
typedef struct _drm_via_sarea {
unsigned int dirty;
unsigned int nbox;
drm_clip_rect_t boxes[VIA_NR_SAREA_CLIPRECTS];
drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
int texAge; /* last time texture was uploaded */
int ctxOwner; /* last context to upload state */
int vertexPrim;
/*
* Below is for XvMC.
* We want the lock integers alone on, and aligned to, a cache line.
* Therefore this somewhat strange construct.
*/
char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];
unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];
unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
} drm_via_sarea_t;
typedef struct _drm_via_cmdbuf_size {
enum {
VIA_CMDBUF_SPACE = 0x01,
VIA_CMDBUF_LAG = 0x02
} func;
int wait;
uint32_t size;
} drm_via_cmdbuf_size_t;
typedef enum {
VIA_IRQ_ABSOLUTE = 0x0,
VIA_IRQ_RELATIVE = 0x1,
VIA_IRQ_SIGNAL = 0x10000000,
VIA_IRQ_FORCE_SEQUENCE = 0x20000000
} via_irq_seq_type_t;
#define VIA_IRQ_FLAGS_MASK 0xF0000000
struct drm_via_wait_irq_request{
unsigned irq;
via_irq_seq_type_t type;
uint32_t sequence;
uint32_t signal;
};
typedef union drm_via_irqwait {
struct drm_via_wait_irq_request request;
struct drm_wait_vblank_reply reply;
} drm_via_irqwait_t;
#ifdef __KERNEL__
int via_fb_init(DRM_IOCTL_ARGS);
int via_mem_alloc(DRM_IOCTL_ARGS);
int via_mem_free(DRM_IOCTL_ARGS);
int via_agp_init(DRM_IOCTL_ARGS);
int via_map_init(DRM_IOCTL_ARGS);
int via_decoder_futex(DRM_IOCTL_ARGS);
int via_dma_init(DRM_IOCTL_ARGS);
int via_cmdbuffer(DRM_IOCTL_ARGS);
int via_flush_ioctl(DRM_IOCTL_ARGS);
int via_pci_cmdbuffer(DRM_IOCTL_ARGS);
int via_cmdbuf_size(DRM_IOCTL_ARGS);
int via_wait_irq(DRM_IOCTL_ARGS);
#endif
#endif /* _VIA_DRM_H_ */
/*
* Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/config.h>
#include "drmP.h"
#include "via_drm.h"
#include "via_drv.h"
#include "drm_pciids.h"
static int postinit(struct drm_device *dev, unsigned long flags)
{
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
);
return 0;
}
static int version(drm_version_t * version)
{
int len;
version->version_major = DRIVER_MAJOR;
version->version_minor = DRIVER_MINOR;
version->version_patchlevel = DRIVER_PATCHLEVEL;
DRM_COPY(version->name, DRIVER_NAME);
DRM_COPY(version->date, DRIVER_DATE);
DRM_COPY(version->desc, DRIVER_DESC);
return 0;
}
static struct pci_device_id pciidlist[] = {
viadrv_PCI_IDS
};
static drm_ioctl_desc_t ioctls[] = {
[DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, 1, 0},
[DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, 1, 0}
};
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
.context_ctor = via_init_context,
.context_dtor = via_final_context,
.vblank_wait = via_driver_vblank_wait,
.irq_preinstall = via_driver_irq_preinstall,
.irq_postinstall = via_driver_irq_postinstall,
.irq_uninstall = via_driver_irq_uninstall,
.irq_handler = via_driver_irq_handler,
.dma_quiescent = via_driver_dma_quiescent,
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
.postinit = postinit,
.version = version,
.ioctls = ioctls,
.num_ioctls = DRM_ARRAY_SIZE(ioctls),
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
}
};
static int __init via_init(void)
{
via_init_command_verifier();
return drm_init(&driver);
}
static void __exit via_exit(void)
{
drm_exit(&driver);
}
module_init(via_init);
module_exit(via_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
/*
* Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _VIA_DRV_H_
#define _VIA_DRV_H_
#define DRIVER_AUTHOR "VIA"
#define DRIVER_NAME "via"
#define DRIVER_DESC "VIA Unichrome / Pro"
#define DRIVER_DATE "20050523"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 6
#define DRIVER_PATCHLEVEL 3
#include "via_verifier.h"
#define VIA_PCI_BUF_SIZE 60000
#define VIA_FIRE_BUF_SIZE 1024
#define VIA_NUM_IRQS 2
typedef struct drm_via_ring_buffer {
drm_map_t map;
char *virtual_start;
} drm_via_ring_buffer_t;
typedef uint32_t maskarray_t[5];
typedef struct drm_via_irq {
atomic_t irq_received;
uint32_t pending_mask;
uint32_t enable_mask;
wait_queue_head_t irq_queue;
} drm_via_irq_t;
typedef struct drm_via_private {
drm_via_sarea_t *sarea_priv;
drm_map_t *sarea;
drm_map_t *fb;
drm_map_t *mmio;
unsigned long agpAddr;
wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
char *dma_ptr;
unsigned int dma_low;
unsigned int dma_high;
unsigned int dma_offset;
uint32_t dma_wrap;
volatile uint32_t *last_pause_ptr;
volatile uint32_t *hw_addr_ptr;
drm_via_ring_buffer_t ring;
struct timeval last_vblank;
int last_vblank_valid;
unsigned usec_per_vblank;
drm_via_state_t hc_state;
char pci_buf[VIA_PCI_BUF_SIZE];
const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
uint32_t num_fire_offsets;
int pro_group_a;
drm_via_irq_t via_irqs[VIA_NUM_IRQS];
unsigned num_irqs;
maskarray_t *irq_masks;
uint32_t irq_enable_mask;
uint32_t irq_pending_mask;
} drm_via_private_t;
/* VIA MMIO register access */
#define VIA_BASE ((dev_priv->mmio))
#define VIA_READ(reg) DRM_READ32(VIA_BASE, reg)
#define VIA_WRITE(reg,val) DRM_WRITE32(VIA_BASE, reg, val)
#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val)
extern int via_init_context(drm_device_t * dev, int context);
extern int via_final_context(drm_device_t * dev, int context);
extern int via_do_cleanup_map(drm_device_t * dev);
extern int via_map_init(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
extern void via_driver_irq_preinstall(drm_device_t * dev);
extern void via_driver_irq_postinstall(drm_device_t * dev);
extern void via_driver_irq_uninstall(drm_device_t * dev);
extern int via_dma_cleanup(drm_device_t * dev);
extern void via_init_command_verifier(void);
extern int via_driver_dma_quiescent(drm_device_t * dev);
extern void via_init_futex(drm_via_private_t *dev_priv);
extern void via_cleanup_futex(drm_via_private_t *dev_priv);
extern void via_release_futex(drm_via_private_t *dev_priv, int context);
#endif
/*
* Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
* Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/pci.h>
#include <asm/io.h>
#include "via_ds.h"
extern unsigned int VIA_DEBUG;
set_t *via_setInit(void)
{
int i;
set_t *set;
set = (set_t *) drm_alloc(sizeof(set_t), DRM_MEM_DRIVER);
for (i = 0; i < SET_SIZE; i++) {
set->list[i].free_next = i + 1;
set->list[i].alloc_next = -1;
}
set->list[SET_SIZE - 1].free_next = -1;
set->free = 0;
set->alloc = -1;
set->trace = -1;
return set;
}
int via_setAdd(set_t * set, ITEM_TYPE item)
{
int free = set->free;
if (free != -1) {
set->list[free].val = item;
set->free = set->list[free].free_next;
} else {
return 0;
}
set->list[free].alloc_next = set->alloc;
set->alloc = free;
set->list[free].free_next = -1;
return 1;
}
int via_setDel(set_t * set, ITEM_TYPE item)
{
int alloc = set->alloc;
int prev = -1;
while (alloc != -1) {
if (set->list[alloc].val == item) {
if (prev != -1)
set->list[prev].alloc_next =
set->list[alloc].alloc_next;
else
set->alloc = set->list[alloc].alloc_next;
break;
}
prev = alloc;
alloc = set->list[alloc].alloc_next;
}
if (alloc == -1)
return 0;
set->list[alloc].free_next = set->free;
set->free = alloc;
set->list[alloc].alloc_next = -1;
return 1;
}
/* setFirst -> setAdd -> setNext is wrong */
int via_setFirst(set_t * set, ITEM_TYPE * item)
{
if (set->alloc == -1)
return 0;
*item = set->list[set->alloc].val;
set->trace = set->list[set->alloc].alloc_next;
return 1;
}
int via_setNext(set_t * set, ITEM_TYPE * item)
{
if (set->trace == -1)
return 0;
*item = set->list[set->trace].val;
set->trace = set->list[set->trace].alloc_next;
return 1;
}
int via_setDestroy(set_t * set)
{
drm_free(set, sizeof(set_t), DRM_MEM_DRIVER);
return 1;
}
#define ISFREE(bptr) ((bptr)->free)
#define fprintf(fmt, arg...) do{}while(0)
memHeap_t *via_mmInit(int ofs, int size)
{
PMemBlock blocks;
if (size <= 0)
return 0;
blocks = (TMemBlock *) drm_calloc(1, sizeof(TMemBlock), DRM_MEM_DRIVER);
if (blocks) {
blocks->ofs = ofs;
blocks->size = size;
blocks->free = 1;
return (memHeap_t *) blocks;
} else
return 0;
}
static TMemBlock *SliceBlock(TMemBlock * p,
int startofs, int size,
int reserved, int alignment)
{
TMemBlock *newblock;
/* break left */
if (startofs > p->ofs) {
newblock =
(TMemBlock *) drm_calloc(1, sizeof(TMemBlock),
DRM_MEM_DRIVER);
newblock->ofs = startofs;
newblock->size = p->size - (startofs - p->ofs);
newblock->free = 1;
newblock->next = p->next;
p->size -= newblock->size;
p->next = newblock;
p = newblock;
}
/* break right */
if (size < p->size) {
newblock =
(TMemBlock *) drm_calloc(1, sizeof(TMemBlock),
DRM_MEM_DRIVER);
newblock->ofs = startofs + size;
newblock->size = p->size - size;
newblock->free = 1;
newblock->next = p->next;
p->size = size;
p->next = newblock;
}
/* p = middle block */
p->align = alignment;
p->free = 0;
p->reserved = reserved;
return p;
}
PMemBlock via_mmAllocMem(memHeap_t * heap, int size, int align2,
int startSearch)
{
int mask, startofs, endofs;
TMemBlock *p;
if (!heap || align2 < 0 || size <= 0)
return NULL;
mask = (1 << align2) - 1;
startofs = 0;
p = (TMemBlock *) heap;
while (p) {
if (ISFREE(p)) {
startofs = (p->ofs + mask) & ~mask;
if (startofs < startSearch)
startofs = startSearch;
endofs = startofs + size;
if (endofs <= (p->ofs + p->size))
break;
}
p = p->next;
}
if (!p)
return NULL;
p = SliceBlock(p, startofs, size, 0, mask + 1);
p->heap = heap;
return p;
}
static __inline__ int Join2Blocks(TMemBlock * p)
{
if (p->free && p->next && p->next->free) {
TMemBlock *q = p->next;
p->size += q->size;
p->next = q->next;
drm_free(q, sizeof(TMemBlock), DRM_MEM_DRIVER);
return 1;
}
return 0;
}
int via_mmFreeMem(PMemBlock b)
{
TMemBlock *p, *prev;
if (!b)
return 0;
if (!b->heap) {
fprintf(stderr, "no heap\n");
return -1;
}
p = b->heap;
prev = NULL;
while (p && p != b) {
prev = p;
p = p->next;
}
if (!p || p->free || p->reserved) {
if (!p)
fprintf(stderr, "block not found in heap\n");
else if (p->free)
fprintf(stderr, "block already free\n");
else
fprintf(stderr, "block is reserved\n");
return -1;
}
p->free = 1;
Join2Blocks(p);
if (prev)
Join2Blocks(prev);
return 0;
}
/*
* Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
* Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _via_ds_h_
#define _via_ds_h_
#include "drmP.h"
/* Set Data Structure */
#define SET_SIZE 5000
typedef unsigned long ITEM_TYPE;
typedef struct {
ITEM_TYPE val;
int alloc_next, free_next;
} list_item_t;
typedef struct {
int alloc;
int free;
int trace;
list_item_t list[SET_SIZE];
} set_t;
set_t *via_setInit(void);
int via_setAdd(set_t * set, ITEM_TYPE item);
int via_setDel(set_t * set, ITEM_TYPE item);
int via_setFirst(set_t * set, ITEM_TYPE * item);
int via_setNext(set_t * set, ITEM_TYPE * item);
int via_setDestroy(set_t * set);
#endif
#ifndef MM_INC
#define MM_INC
struct mem_block_t {
struct mem_block_t *next;
struct mem_block_t *heap;
int ofs, size;
int align;
int free:1;
int reserved:1;
};
typedef struct mem_block_t TMemBlock;
typedef struct mem_block_t *PMemBlock;
/* a heap is just the first block in a chain */
typedef struct mem_block_t memHeap_t;
static __inline__ int mmBlockSize(PMemBlock b)
{
return b->size;
}
static __inline__ int mmOffset(PMemBlock b)
{
return b->ofs;
}
static __inline__ void mmMarkReserved(PMemBlock b)
{
b->reserved = 1;
}
/*
* input: total size in bytes
* return: a heap pointer if OK, NULL if error
*/
memHeap_t *via_mmInit(int ofs, int size);
PMemBlock via_mmAllocMem(memHeap_t * heap, int size, int align2,
int startSearch);
/*
* Free block starts at offset
* input: pointer to a block
* return: 0 if OK, -1 if error
*/
int via_mmFreeMem(PMemBlock b);
#endif
/* via_irq.c
*
* Copyright 2004 BEAM Ltd.
* Copyright 2002 Tungsten Graphics, Inc.
* Copyright 2005 Thomas Hellstrom.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BEAM LTD, TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Terry Barnaby <terry1@beam.ltd.uk>
* Keith Whitwell <keith@tungstengraphics.com>
* Thomas Hellstrom <unichrome@shipmail.org>
*
* This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
* interrupt, as well as an infrastructure to handle other interrupts of the chip.
* The refresh rate is also calculated for video playback sync purposes.
*/
#include "drmP.h"
#include "drm.h"
#include "via_drm.h"
#include "via_drv.h"
#define VIA_REG_INTERRUPT 0x200
/* VIA_REG_INTERRUPT */
#define VIA_IRQ_GLOBAL (1 << 31)
#define VIA_IRQ_VBLANK_ENABLE (1 << 19)
#define VIA_IRQ_VBLANK_PENDING (1 << 3)
#define VIA_IRQ_HQV0_ENABLE (1 << 11)
#define VIA_IRQ_HQV1_ENABLE (1 << 25)
#define VIA_IRQ_HQV0_PENDING (1 << 9)
#define VIA_IRQ_HQV1_PENDING (1 << 10)
/*
* Device-specific IRQs go here. This type might need to be extended with
* the register if there are multiple IRQ control registers.
* Currently we activate the HQV interrupts of Unichrome Pro group A.
*/
static maskarray_t via_pro_group_a_irqs[] = {
{VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, 0x00000000 },
{VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, 0x00000000 }};
static int via_num_pro_group_a = sizeof(via_pro_group_a_irqs)/sizeof(maskarray_t);
static maskarray_t via_unichrome_irqs[] = {};
static int via_num_unichrome = sizeof(via_unichrome_irqs)/sizeof(maskarray_t);
static unsigned time_diff(struct timeval *now,struct timeval *then)
{
return (now->tv_usec >= then->tv_usec) ?
now->tv_usec - then->tv_usec :
1000000 - (then->tv_usec - now->tv_usec);
}
irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
{
drm_device_t *dev = (drm_device_t *) arg;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
int handled = 0;
struct timeval cur_vblank;
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
int i;
status = VIA_READ(VIA_REG_INTERRUPT);
if (status & VIA_IRQ_VBLANK_PENDING) {
atomic_inc(&dev->vbl_received);
if (!(atomic_read(&dev->vbl_received) & 0x0F)) {
do_gettimeofday(&cur_vblank);
if (dev_priv->last_vblank_valid) {
dev_priv->usec_per_vblank =
time_diff( &cur_vblank,&dev_priv->last_vblank) >> 4;
}
dev_priv->last_vblank = cur_vblank;
dev_priv->last_vblank_valid = 1;
}
if (!(atomic_read(&dev->vbl_received) & 0xFF)) {
DRM_DEBUG("US per vblank is: %u\n",
dev_priv->usec_per_vblank);
}
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
handled = 1;
}
for (i=0; i<dev_priv->num_irqs; ++i) {
if (status & cur_irq->pending_mask) {
atomic_inc( &cur_irq->irq_received );
DRM_WAKEUP( &cur_irq->irq_queue );
handled = 1;
}
cur_irq++;
}
/* Acknowlege interrupts */
VIA_WRITE(VIA_REG_INTERRUPT, status);
if (handled)
return IRQ_HANDLED;
else
return IRQ_NONE;
}
static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
{
u32 status;
if (dev_priv) {
/* Acknowlege interrupts */
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status |
dev_priv->irq_pending_mask);
}
}
int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
unsigned int cur_vblank;
int ret = 0;
DRM_DEBUG("viadrv_vblank_wait\n");
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return -EINVAL;
}
viadrv_acknowledge_irqs(dev_priv);
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using vertical blanks...
*/
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received)) -
*sequence) <= (1 << 23)));
*sequence = cur_vblank;
return ret;
}
static int
via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
unsigned int *sequence)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
unsigned int cur_irq_sequence;
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
int ret = 0;
maskarray_t *masks = dev_priv->irq_masks;
DRM_DEBUG("%s\n", __FUNCTION__);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
if (irq >= dev_priv->num_irqs ) {
DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, irq);
return DRM_ERR(EINVAL);
}
cur_irq += irq;
if (masks[irq][2] && !force_sequence) {
DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
((VIA_READ(masks[irq][2]) & masks[irq][3]) == masks[irq][4]));
cur_irq_sequence = atomic_read(&cur_irq->irq_received);
} else {
DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
(((cur_irq_sequence = atomic_read(&cur_irq->irq_received)) -
*sequence) <= (1 << 23)));
}
*sequence = cur_irq_sequence;
return ret;
}
/*
* drm_dma.h hooks
*/
void via_driver_irq_preinstall(drm_device_t * dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
int i;
DRM_DEBUG("driver_irq_preinstall: dev_priv: %p\n", dev_priv);
if (dev_priv) {
dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
dev_priv->irq_masks = (dev_priv->pro_group_a) ?
via_pro_group_a_irqs : via_unichrome_irqs;
dev_priv->num_irqs = (dev_priv->pro_group_a) ?
via_num_pro_group_a : via_num_unichrome;
for(i=0; i < dev_priv->num_irqs; ++i) {
atomic_set(&cur_irq->irq_received, 0);
cur_irq->enable_mask = dev_priv->irq_masks[i][0];
cur_irq->pending_mask = dev_priv->irq_masks[i][1];
DRM_INIT_WAITQUEUE( &cur_irq->irq_queue );
dev_priv->irq_enable_mask |= cur_irq->enable_mask;
dev_priv->irq_pending_mask |= cur_irq->pending_mask;
cur_irq++;
DRM_DEBUG("Initializing IRQ %d\n", i);
}
dev_priv->last_vblank_valid = 0;
// Clear VSync interrupt regs
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status &
~(dev_priv->irq_enable_mask));
/* Clear bits if they're already high */
viadrv_acknowledge_irqs(dev_priv);
}
}
void via_driver_irq_postinstall(drm_device_t * dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
DRM_DEBUG("via_driver_irq_postinstall\n");
if (dev_priv) {
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
| dev_priv->irq_enable_mask);
/* Some magic, oh for some data sheets ! */
VIA_WRITE8(0x83d4, 0x11);
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
}
}
void via_driver_irq_uninstall(drm_device_t * dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
DRM_DEBUG("driver_irq_uninstall)\n");
if (dev_priv) {
/* Some more magic, oh for some data sheets ! */
VIA_WRITE8(0x83d4, 0x11);
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status &
~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
}
}
int via_wait_irq(DRM_IOCTL_ARGS)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_via_irqwait_t __user *argp = (void __user *)data;
drm_via_irqwait_t irqwait;
struct timeval now;
int ret = 0;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
int force_sequence;
if (!dev->irq)
return DRM_ERR(EINVAL);
DRM_COPY_FROM_USER_IOCTL(irqwait, argp, sizeof(irqwait));
if (irqwait.request.irq >= dev_priv->num_irqs) {
DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
irqwait.request.irq);
return DRM_ERR(EINVAL);
}
cur_irq += irqwait.request.irq;
switch (irqwait.request.type & ~VIA_IRQ_FLAGS_MASK) {
case VIA_IRQ_RELATIVE:
irqwait.request.sequence += atomic_read(&cur_irq->irq_received);
irqwait.request.type &= ~_DRM_VBLANK_RELATIVE;
case VIA_IRQ_ABSOLUTE:
break;
default:
return DRM_ERR(EINVAL);
}
if (irqwait.request.type & VIA_IRQ_SIGNAL) {
DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n",
__FUNCTION__);
return DRM_ERR(EINVAL);
}
force_sequence = (irqwait.request.type & VIA_IRQ_FORCE_SEQUENCE);
ret = via_driver_irq_wait(dev, irqwait.request.irq, force_sequence,
&irqwait.request.sequence);
do_gettimeofday(&now);
irqwait.reply.tval_sec = now.tv_sec;
irqwait.reply.tval_usec = now.tv_usec;
DRM_COPY_TO_USER_IOCTL(argp, irqwait, sizeof(irqwait));
return ret;
}
/*
* Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
#include "via_drm.h"
#include "via_drv.h"
static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
{
drm_via_private_t *dev_priv;
DRM_DEBUG("%s\n", __FUNCTION__);
dev_priv = drm_alloc(sizeof(drm_via_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL)
return -ENOMEM;
memset(dev_priv, 0, sizeof(drm_via_private_t));
DRM_GETSAREA();
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;
via_do_cleanup_map(dev);
return -EINVAL;
}
dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
if (!dev_priv->fb) {
DRM_ERROR("could not find framebuffer!\n");
dev->dev_private = (void *)dev_priv;
via_do_cleanup_map(dev);
return -EINVAL;
}
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio) {
DRM_ERROR("could not find mmio region!\n");
dev->dev_private = (void *)dev_priv;
via_do_cleanup_map(dev);
return -EINVAL;
}
dev_priv->sarea_priv =
(drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle +
init->sarea_priv_offset);
dev_priv->agpAddr = init->agpAddr;
via_init_futex( dev_priv );
dev_priv->pro_group_a = (dev->pdev->device == 0x3118);
dev->dev_private = (void *)dev_priv;
return 0;
}
int via_do_cleanup_map(drm_device_t * dev)
{
if (dev->dev_private) {
drm_via_private_t *dev_priv = dev->dev_private;
via_dma_cleanup(dev);
drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
dev->dev_private = NULL;
}
return 0;
}
int via_map_init(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_via_init_t init;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_COPY_FROM_USER_IOCTL(init, (drm_via_init_t *) data, sizeof(init));
switch (init.func) {
case VIA_INIT_MAP:
return via_do_init_map(dev, &init);
case VIA_CLEANUP_MAP:
return via_do_cleanup_map(dev);
}
return -EINVAL;
}
/*
* Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
#include "via_drm.h"
#include "via_drv.h"
#include "via_ds.h"
#include "via_mm.h"
#define MAX_CONTEXT 100
typedef struct {
int used;
int context;
set_t *sets[2]; /* 0 for frame buffer, 1 for AGP , 2 for System */
} via_context_t;
static via_context_t global_ppriv[MAX_CONTEXT];
static int via_agp_alloc(drm_via_mem_t * mem);
static int via_agp_free(drm_via_mem_t * mem);
static int via_fb_alloc(drm_via_mem_t * mem);
static int via_fb_free(drm_via_mem_t * mem);
static int add_alloc_set(int context, int type, unsigned int val)
{
int i, retval = 0;
for (i = 0; i < MAX_CONTEXT; i++) {
if (global_ppriv[i].used && global_ppriv[i].context == context) {
retval = via_setAdd(global_ppriv[i].sets[type], val);
break;
}
}
return retval;
}
static int del_alloc_set(int context, int type, unsigned int val)
{
int i, retval = 0;
for (i = 0; i < MAX_CONTEXT; i++)
if (global_ppriv[i].used && global_ppriv[i].context == context) {
retval = via_setDel(global_ppriv[i].sets[type], val);
break;
}
return retval;
}
/* agp memory management */
static memHeap_t *AgpHeap = NULL;
int via_agp_init(DRM_IOCTL_ARGS)
{
drm_via_agp_t agp;
DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t *) data, sizeof(agp));
AgpHeap = via_mmInit(agp.offset, agp.size);
DRM_DEBUG("offset = %lu, size = %lu", (unsigned long)agp.offset, (unsigned long)agp.size);
return 0;
}
/* fb memory management */
static memHeap_t *FBHeap = NULL;
int via_fb_init(DRM_IOCTL_ARGS)
{
drm_via_fb_t fb;
DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t *) data, sizeof(fb));
FBHeap = via_mmInit(fb.offset, fb.size);
DRM_DEBUG("offset = %lu, size = %lu", (unsigned long)fb.offset, (unsigned long)fb.size);
return 0;
}
int via_init_context(struct drm_device *dev, int context)
{
int i;
for (i = 0; i < MAX_CONTEXT; i++)
if (global_ppriv[i].used &&
(global_ppriv[i].context == context))
break;
if (i >= MAX_CONTEXT) {
for (i = 0; i < MAX_CONTEXT; i++) {
if (!global_ppriv[i].used) {
global_ppriv[i].context = context;
global_ppriv[i].used = 1;
global_ppriv[i].sets[0] = via_setInit();
global_ppriv[i].sets[1] = via_setInit();
DRM_DEBUG("init allocation set, socket=%d,"
" context = %d\n", i, context);
break;
}
}
if ((i >= MAX_CONTEXT) || (global_ppriv[i].sets[0] == NULL) ||
(global_ppriv[i].sets[1] == NULL)) {
return 0;
}
}
return 1;
}
int via_final_context(struct drm_device *dev, int context)
{
int i;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
for (i = 0; i < MAX_CONTEXT; i++)
if (global_ppriv[i].used &&
(global_ppriv[i].context == context))
break;
if (i < MAX_CONTEXT) {
set_t *set;
ITEM_TYPE item;
int retval;
DRM_DEBUG("find socket %d, context = %d\n", i, context);
/* Video Memory */
set = global_ppriv[i].sets[0];
retval = via_setFirst(set, &item);
while (retval) {
DRM_DEBUG("free video memory 0x%lx\n", item);
via_mmFreeMem((PMemBlock) item);
retval = via_setNext(set, &item);
}
via_setDestroy(set);
/* AGP Memory */
set = global_ppriv[i].sets[1];
retval = via_setFirst(set, &item);
while (retval) {
DRM_DEBUG("free agp memory 0x%lx\n", item);
via_mmFreeMem((PMemBlock) item);
retval = via_setNext(set, &item);
}
via_setDestroy(set);
global_ppriv[i].used = 0;
}
via_release_futex(dev_priv, context);
#if defined(__linux__)
/* Linux specific until context tracking code gets ported to BSD */
/* Last context, perform cleanup */
if (dev->ctx_count == 1 && dev->dev_private) {
DRM_DEBUG("Last Context\n");
if (dev->irq)
drm_irq_uninstall(dev);
via_cleanup_futex(dev_priv);
via_do_cleanup_map(dev);
}
#endif
return 1;
}
int via_mem_alloc(DRM_IOCTL_ARGS)
{
drm_via_mem_t mem;
DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t *) data, sizeof(mem));
switch (mem.type) {
case VIDEO:
if (via_fb_alloc(&mem) < 0)
return -EFAULT;
DRM_COPY_TO_USER_IOCTL((drm_via_mem_t *) data, mem,
sizeof(mem));
return 0;
case AGP:
if (via_agp_alloc(&mem) < 0)
return -EFAULT;
DRM_COPY_TO_USER_IOCTL((drm_via_mem_t *) data, mem,
sizeof(mem));
return 0;
}
return -EFAULT;
}
static int via_fb_alloc(drm_via_mem_t * mem)
{
drm_via_mm_t fb;
PMemBlock block;
int retval = 0;
if (!FBHeap)
return -1;
fb.size = mem->size;
fb.context = mem->context;
block = via_mmAllocMem(FBHeap, fb.size, 5, 0);
if (block) {
fb.offset = block->ofs;
fb.free = (unsigned long)block;
if (!add_alloc_set(fb.context, VIDEO, fb.free)) {
DRM_DEBUG("adding to allocation set fails\n");
via_mmFreeMem((PMemBlock) fb.free);
retval = -1;
}
} else {
fb.offset = 0;
fb.size = 0;
fb.free = 0;
retval = -1;
}
mem->offset = fb.offset;
mem->index = fb.free;
DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size,
(int)fb.offset);
return retval;
}
static int via_agp_alloc(drm_via_mem_t * mem)
{
drm_via_mm_t agp;
PMemBlock block;
int retval = 0;
if (!AgpHeap)
return -1;
agp.size = mem->size;
agp.context = mem->context;
block = via_mmAllocMem(AgpHeap, agp.size, 5, 0);
if (block) {
agp.offset = block->ofs;
agp.free = (unsigned long)block;
if (!add_alloc_set(agp.context, AGP, agp.free)) {
DRM_DEBUG("adding to allocation set fails\n");
via_mmFreeMem((PMemBlock) agp.free);
retval = -1;
}
} else {
agp.offset = 0;
agp.size = 0;
agp.free = 0;
}
mem->offset = agp.offset;
mem->index = agp.free;
DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size,
(unsigned int)agp.offset);
return retval;
}
int via_mem_free(DRM_IOCTL_ARGS)
{
drm_via_mem_t mem;
DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t *) data, sizeof(mem));
switch (mem.type) {
case VIDEO:
if (via_fb_free(&mem) == 0)
return 0;
break;
case AGP:
if (via_agp_free(&mem) == 0)
return 0;
break;
}
return -EFAULT;
}
static int via_fb_free(drm_via_mem_t * mem)
{
drm_via_mm_t fb;
int retval = 0;
if (!FBHeap) {
return -1;
}
fb.free = mem->index;
fb.context = mem->context;
if (!fb.free) {
return -1;
}
via_mmFreeMem((PMemBlock) fb.free);
if (!del_alloc_set(fb.context, VIDEO, fb.free)) {
retval = -1;
}
DRM_DEBUG("free fb, free = %ld\n", fb.free);
return retval;
}
static int via_agp_free(drm_via_mem_t * mem)
{
drm_via_mm_t agp;
int retval = 0;
agp.free = mem->index;
agp.context = mem->context;
if (!agp.free)
return -1;
via_mmFreeMem((PMemBlock) agp.free);
if (!del_alloc_set(agp.context, AGP, agp.free)) {
retval = -1;
}
DRM_DEBUG("free agp, free = %ld\n", agp.free);
return retval;
}
/*
* Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _via_drm_mm_h_
#define _via_drm_mm_h_
typedef struct {
unsigned int context;
unsigned int size;
unsigned long offset;
unsigned long free;
} drm_via_mm_t;
typedef struct {
unsigned int size;
unsigned long handle;
void *virtual;
} drm_via_dma_t;
#endif
此差异已折叠。
/*
* Copyright 2004 The Unichrome Project. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE UNICHROME PROJECT, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Thomas Hellström 2004.
*/
#ifndef _VIA_VERIFIER_H_
#define _VIA_VERIFIER_H_
typedef enum{
no_sequence = 0,
z_address,
dest_address,
tex_address
}drm_via_sequence_t;
typedef struct{
unsigned texture;
uint32_t z_addr;
uint32_t d_addr;
uint32_t t_addr[2][10];
uint32_t pitch[2][10];
uint32_t height[2][10];
uint32_t tex_level_lo[2];
uint32_t tex_level_hi[2];
uint32_t tex_palette_size[2];
drm_via_sequence_t unfinished;
int agp_texture;
int multitex;
drm_device_t *dev;
drm_map_t *map_cache;
uint32_t vertex_count;
int agp;
const uint32_t *buf_start;
} drm_via_state_t;
extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
drm_device_t *dev, int agp);
#endif
/*
* Copyright 2005 Thomas Hellstrom. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Thomas Hellstrom 2005.
*
* Video and XvMC related functions.
*/
#include "drmP.h"
#include "via_drm.h"
#include "via_drv.h"
void
via_init_futex(drm_via_private_t *dev_priv)
{
unsigned int i;
DRM_DEBUG("%s\n", __FUNCTION__);
for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
}
}
void
via_cleanup_futex(drm_via_private_t *dev_priv)
{
}
void
via_release_futex(drm_via_private_t *dev_priv, int context)
{
unsigned int i;
volatile int *lock;
for (i=0; i < VIA_NR_XVMC_LOCKS; ++i) {
lock = (int *) XVMCLOCKPTR(dev_priv->sarea_priv, i);
if ( (_DRM_LOCKING_CONTEXT( *lock ) == context)) {
if (_DRM_LOCK_IS_HELD( *lock ) && (*lock & _DRM_LOCK_CONT)) {
DRM_WAKEUP( &(dev_priv->decoder_queue[i]));
}
*lock = 0;
}
}
}
int
via_decoder_futex(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_via_futex_t fx;
volatile int *lock;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
int ret = 0;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_COPY_FROM_USER_IOCTL(fx, (drm_via_futex_t *) data, sizeof(fx));
if (fx.lock > VIA_NR_XVMC_LOCKS)
return -EFAULT;
lock = (int *)XVMCLOCKPTR(sAPriv, fx.lock);
switch (fx.func) {
case VIA_FUTEX_WAIT:
DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx.lock],
(fx.ms / 10) * (DRM_HZ / 100), *lock != fx.val);
return ret;
case VIA_FUTEX_WAKE:
DRM_WAKEUP(&(dev_priv->decoder_queue[fx.lock]));
return 0;
}
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册