提交 eb6b6d7c 编写于 作者: D Dave Airlie

Merge remote branch 'korg/drm-radeon-testing' into drm-next-stage

* korg/drm-radeon-testing: (62 commits)
  drm/radeon/kms: update new pll algo
  drm/radeon/kms: add support for square microtiles on r3xx-r5xx
  drm/radeon/kms: force pinning buffer into visible VRAM
  drm/radeon/kms/evergreen: fix typo in cursor code
  drm/radeon/kms: implement reading active PCIE lanes on R600+
  drm/radeon/kms: for downclocking non-mobility check PERFORMANCE state
  drm/radeon/kms: simplify storing current and requested PM mode
  drm/radeon: fixes for r6xx/r7xx gfx init
  drm/radeon/rv740: fix backend setup
  drm/radeon/kms: fix R3XX/R4XX memory controller initialization
  [rfc] drm/radeon/kms: pm debugging check for vbl.
  drm/radeon: Fix memory allocation failures in the preKMS command stream checking.
  drm: Add generic multipart buffer.
  drm/radeon/kms: simplify memory controller setup V2
  drm/radeon: Add asic hook for dma copy to r200 cards.
  drm/radeon/kms: Create asic structure for r300 pcie cards.
  drm/radeon/kms: remove unused r600_gart_clear_page
  drm/radeon/kms: remove HDP flushes from fence emit (v2)
  drm/radeon/kms: add LVDS pll quirk for Dell Studio 15
  drm/radeon/kms: simplify picking power state
  ...

Conflicts:
	drivers/gpu/drm/radeon/atom.c
	drivers/gpu/drm/radeon/atombios.h
	drivers/gpu/drm/radeon/atombios_dp.c
	drivers/gpu/drm/radeon/r600.c
	drivers/gpu/drm/radeon/r600_audio.c
	drivers/gpu/drm/radeon/r600_cp.c
	drivers/gpu/drm/radeon/radeon.h
	drivers/gpu/drm/radeon/radeon_connectors.c
	drivers/gpu/drm/radeon/radeon_ring.c
	drivers/gpu/drm/radeon/rv770.c
......@@ -4,7 +4,7 @@
ccflags-y := -Iinclude/drm
drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o drm_drawable.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
......
/**************************************************************************
*
* Copyright 2010 Pauli Nieminen.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Multipart buffer for coping data which is larger than the page size.
*
* Authors:
* Pauli Nieminen <suokkos-at-gmail-dot-com>
*/
#include "drm_buffer.h"
/**
* Allocate the drm buffer object.
*
* buf: Pointer to a pointer where the object is stored.
* size: The number of bytes to allocate.
*/
int drm_buffer_alloc(struct drm_buffer **buf, int size)
{
int nr_pages = size / PAGE_SIZE + 1;
int idx;
/* Allocating pointer table to end of structure makes drm_buffer
* variable sized */
*buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
GFP_KERNEL);
if (*buf == NULL) {
DRM_ERROR("Failed to allocate drm buffer object to hold"
" %d bytes in %d pages.\n",
size, nr_pages);
return -ENOMEM;
}
(*buf)->size = size;
for (idx = 0; idx < nr_pages; ++idx) {
(*buf)->data[idx] =
kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
GFP_KERNEL);
if ((*buf)->data[idx] == NULL) {
DRM_ERROR("Failed to allocate %dth page for drm"
" buffer with %d bytes and %d pages.\n",
idx + 1, size, nr_pages);
goto error_out;
}
}
return 0;
error_out:
/* Only last element can be null pointer so check for it first. */
if ((*buf)->data[idx])
kfree((*buf)->data[idx]);
for (--idx; idx >= 0; --idx)
kfree((*buf)->data[idx]);
kfree(*buf);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_buffer_alloc);
/**
* Copy the user data to the begin of the buffer and reset the processing
* iterator.
*
* user_data: A pointer the data that is copied to the buffer.
* size: The Number of bytes to copy.
*/
extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
void __user *user_data, int size)
{
int nr_pages = size / PAGE_SIZE + 1;
int idx;
if (size > buf->size) {
DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
" %d bytes space\n",
size, buf->size);
return -EFAULT;
}
for (idx = 0; idx < nr_pages; ++idx) {
if (DRM_COPY_FROM_USER(buf->data[idx],
user_data + idx * PAGE_SIZE,
min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
DRM_ERROR("Failed to copy user data (%p) to drm buffer"
" (%p) %dth page.\n",
user_data, buf, idx);
return -EFAULT;
}
}
buf->iterator = 0;
return 0;
}
EXPORT_SYMBOL(drm_buffer_copy_from_user);
/**
* Free the drm buffer object
*/
void drm_buffer_free(struct drm_buffer *buf)
{
if (buf != NULL) {
int nr_pages = buf->size / PAGE_SIZE + 1;
int idx;
for (idx = 0; idx < nr_pages; ++idx)
kfree(buf->data[idx]);
kfree(buf);
}
}
EXPORT_SYMBOL(drm_buffer_free);
/**
* Read an object from buffer that may be split to multiple parts. If object
* is not split function just returns the pointer to object in buffer. But in
* case of split object data is copied to given stack object that is suplied
* by caller.
*
* The processing location of the buffer is also advanced to the next byte
* after the object.
*
* objsize: The size of the objet in bytes.
* stack_obj: A pointer to a memory location where object can be copied.
*/
void *drm_buffer_read_object(struct drm_buffer *buf,
int objsize, void *stack_obj)
{
int idx = drm_buffer_index(buf);
int page = drm_buffer_page(buf);
void *obj = 0;
if (idx + objsize <= PAGE_SIZE) {
obj = &buf->data[page][idx];
} else {
/* The object is split which forces copy to temporary object.*/
int beginsz = PAGE_SIZE - idx;
memcpy(stack_obj, &buf->data[page][idx], beginsz);
memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
objsize - beginsz);
obj = stack_obj;
}
drm_buffer_advance(buf, objsize);
return obj;
}
EXPORT_SYMBOL(drm_buffer_read_object);
......@@ -60,8 +60,7 @@
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* define the number of Extension EDID block */
#define MAX_EDID_EXT_NUM 4
#define LEVEL_DMT 0
#define LEVEL_GTF 1
......@@ -114,14 +113,14 @@ static const u8 edid_header[] = {
};
/**
* edid_is_valid - sanity check EDID data
* drm_edid_is_valid - sanity check EDID data
* @edid: EDID data
*
* Sanity check the EDID block by looking at the header, the version number
* and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
* valid.
*/
static bool edid_is_valid(struct edid *edid)
bool drm_edid_is_valid(struct edid *edid)
{
int i, score = 0;
u8 csum = 0;
......@@ -163,6 +162,7 @@ static bool edid_is_valid(struct edid *edid)
}
return 0;
}
EXPORT_SYMBOL(drm_edid_is_valid);
/**
* edid_vendor - match a string against EDID's obfuscated vendor field
......@@ -1112,8 +1112,8 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
}
/* Chose real EDID extension number */
edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
MAX_EDID_EXT_NUM : edid->extensions;
edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
DRM_MAX_EDID_EXT_NUM : edid->extensions;
/* Find CEA extension */
for (i = 0; i < edid_ext_num; i++) {
......@@ -1195,7 +1195,7 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter, buf, len))
return -1;
if (edid_is_valid((struct edid *)buf))
if (drm_edid_is_valid((struct edid *)buf))
return 0;
}
......@@ -1220,7 +1220,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,
int ret;
struct edid *edid;
edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1),
edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
GFP_KERNEL);
if (edid == NULL) {
dev_warn(&connector->dev->pdev->dev,
......@@ -1238,14 +1238,14 @@ struct edid *drm_get_edid(struct drm_connector *connector,
if (edid->extensions != 0) {
int edid_ext_num = edid->extensions;
if (edid_ext_num > MAX_EDID_EXT_NUM) {
if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
dev_warn(&connector->dev->pdev->dev,
"The number of extension(%d) is "
"over max (%d), actually read number (%d)\n",
edid_ext_num, MAX_EDID_EXT_NUM,
MAX_EDID_EXT_NUM);
edid_ext_num, DRM_MAX_EDID_EXT_NUM,
DRM_MAX_EDID_EXT_NUM);
/* Reset EDID extension number to be read */
edid_ext_num = MAX_EDID_EXT_NUM;
edid_ext_num = DRM_MAX_EDID_EXT_NUM;
}
/* Read EDID including extensions too */
ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
......@@ -1288,8 +1288,8 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
goto end;
/* Chose real EDID extension number */
edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
MAX_EDID_EXT_NUM : edid->extensions;
edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
DRM_MAX_EDID_EXT_NUM : edid->extensions;
/* Find CEA extension */
for (i = 0; i < edid_ext_num; i++) {
......@@ -1346,7 +1346,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
if (edid == NULL) {
return 0;
}
if (!edid_is_valid(edid)) {
if (!drm_edid_is_valid(edid)) {
dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector));
return 0;
......
......@@ -30,6 +30,9 @@ $(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
$(call if_changed,mkregtable)
$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
$(call if_changed,mkregtable)
$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
$(obj)/r200.o: $(obj)/r200_reg_safe.h
......@@ -42,6 +45,8 @@ $(obj)/r420.o: $(obj)/r420_reg_safe.h
$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
radeon_irq.o r300_cmdbuf.o r600_cp.o
# add KMS driver
......@@ -54,7 +59,8 @@ radeon-y += radeon_device.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
......
此差异已折叠。
......@@ -321,6 +321,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
train_set[lane] = v | p;
}
union aux_channel_transaction {
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
};
/* radeon aux chan functions */
bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
......@@ -329,7 +333,7 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
{
struct drm_device *dev = chan->dev;
struct radeon_device *rdev = dev->dev_private;
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
union aux_channel_transaction args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
int retry_count = 0;
......@@ -341,31 +345,33 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
retry:
memcpy(base, req_bytes, num_bytes);
args.lpAuxRequest = 0;
args.lpDataOut = 16;
args.ucDataOutLen = 0;
args.ucChannelID = chan->rec.i2c_id;
args.ucDelay = delay / 10;
args.v1.lpAuxRequest = 0;
args.v1.lpDataOut = 16;
args.v1.ucDataOutLen = 0;
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
if (ASIC_IS_DCE4(rdev))
args.v2.ucHPD_ID = chan->rec.hpd_id;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (args.ucReplyStatus && !args.ucDataOutLen) {
if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) {
if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10)
goto retry;
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
chan->rec.i2c_id, args.ucReplyStatus, retry_count);
chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count);
return false;
}
if (args.ucDataOutLen && read_byte && read_buf_len) {
if (read_buf_len < args.ucDataOutLen) {
if (args.v1.ucDataOutLen && read_byte && read_buf_len) {
if (read_buf_len < args.v1.ucDataOutLen) {
DRM_ERROR("Buffer to small for return answer %d %d\n",
read_buf_len, args.ucDataOutLen);
read_buf_len, args.v1.ucDataOutLen);
return false;
}
{
int len = min(read_buf_len, args.ucDataOutLen);
int len = min(read_buf_len, args.v1.ucDataOutLen);
memcpy(read_byte, base + 16, len);
}
}
......@@ -626,12 +632,19 @@ void dp_link_train(struct drm_encoder *encoder,
dp_set_link_bw_lanes(radeon_connector, link_configuration);
/* disable downspread on the sink */
dp_set_downspread(radeon_connector, 0);
/* start training on the source */
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
dig_connector->dp_clock, enc_id, 0);
/* set training pattern 1 on the source */
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
dig_connector->dp_clock, enc_id, 0);
if (ASIC_IS_DCE4(rdev)) {
/* start training on the source */
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START);
/* set training pattern 1 on the source */
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1);
} else {
/* start training on the source */
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
dig_connector->dp_clock, enc_id, 0);
/* set training pattern 1 on the source */
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
dig_connector->dp_clock, enc_id, 0);
}
/* set initial vs/emph */
memset(train_set, 0, 4);
......@@ -691,8 +704,11 @@ void dp_link_train(struct drm_encoder *encoder,
/* set training pattern 2 on the sink */
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
/* set training pattern 2 on the source */
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
dig_connector->dp_clock, enc_id, 1);
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2);
else
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
dig_connector->dp_clock, enc_id, 1);
/* channel equalization loop */
tries = 0;
......@@ -729,7 +745,11 @@ void dp_link_train(struct drm_encoder *encoder,
>> DP_TRAIN_PRE_EMPHASIS_SHIFT);
/* disable the training pattern on the sink */
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
else
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
dig_connector->dp_clock, enc_id, 0);
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
dig_connector->dp_clock, enc_id, 0);
......
......@@ -30,11 +30,13 @@
#define D1CRTC_CONTROL 0x6080
#define CRTC_EN (1 << 0)
#define D1CRTC_STATUS 0x609c
#define D1CRTC_UPDATE_LOCK 0x60E8
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
#define D2CRTC_CONTROL 0x6880
#define D2CRTC_STATUS 0x689c
#define D2CRTC_UPDATE_LOCK 0x68E8
#define D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
#define D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
......
此差异已折叠。
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#ifndef __EVERGREEN_REG_H__
#define __EVERGREEN_REG_H__
/* evergreen */
#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310
#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324
#define EVERGREEN_D3VGA_CONTROL 0x3e0
#define EVERGREEN_D4VGA_CONTROL 0x3e4
#define EVERGREEN_D5VGA_CONTROL 0x3e8
#define EVERGREEN_D6VGA_CONTROL 0x3ec
#define EVERGREEN_P1PLL_SS_CNTL 0x414
#define EVERGREEN_P2PLL_SS_CNTL 0x454
# define EVERGREEN_PxPLL_SS_EN (1 << 12)
/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
#define EVERGREEN_GRPH_ENABLE 0x6800
#define EVERGREEN_GRPH_CONTROL 0x6804
# define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
# define EVERGREEN_GRPH_DEPTH_8BPP 0
# define EVERGREEN_GRPH_DEPTH_16BPP 1
# define EVERGREEN_GRPH_DEPTH_32BPP 2
# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
/* 8 BPP */
# define EVERGREEN_GRPH_FORMAT_INDEXED 0
/* 16 BPP */
# define EVERGREEN_GRPH_FORMAT_ARGB1555 0
# define EVERGREEN_GRPH_FORMAT_ARGB565 1
# define EVERGREEN_GRPH_FORMAT_ARGB4444 2
# define EVERGREEN_GRPH_FORMAT_AI88 3
# define EVERGREEN_GRPH_FORMAT_MONO16 4
# define EVERGREEN_GRPH_FORMAT_BGRA5551 5
/* 32 BPP */
# define EVERGREEN_GRPH_FORMAT_ARGB8888 0
# define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
# define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
# define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
# define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
# define EVERGREEN_GRPH_FORMAT_RGB111110 6
# define EVERGREEN_GRPH_FORMAT_BGR101111 7
#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
# define EVERGREEN_GRPH_ENDIAN_NONE 0
# define EVERGREEN_GRPH_ENDIAN_8IN16 1
# define EVERGREEN_GRPH_ENDIAN_8IN32 2
# define EVERGREEN_GRPH_ENDIAN_8IN64 3
# define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
# define EVERGREEN_GRPH_RED_SEL_R 0
# define EVERGREEN_GRPH_RED_SEL_G 1
# define EVERGREEN_GRPH_RED_SEL_B 2
# define EVERGREEN_GRPH_RED_SEL_A 3
# define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
# define EVERGREEN_GRPH_GREEN_SEL_G 0
# define EVERGREEN_GRPH_GREEN_SEL_B 1
# define EVERGREEN_GRPH_GREEN_SEL_A 2
# define EVERGREEN_GRPH_GREEN_SEL_R 3
# define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
# define EVERGREEN_GRPH_BLUE_SEL_B 0
# define EVERGREEN_GRPH_BLUE_SEL_A 1
# define EVERGREEN_GRPH_BLUE_SEL_R 2
# define EVERGREEN_GRPH_BLUE_SEL_G 3
# define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
# define EVERGREEN_GRPH_ALPHA_SEL_A 0
# define EVERGREEN_GRPH_ALPHA_SEL_R 1
# define EVERGREEN_GRPH_ALPHA_SEL_G 2
# define EVERGREEN_GRPH_ALPHA_SEL_B 3
#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x6810
#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x6814
# define EVERGREEN_GRPH_DFQ_ENABLE (1 << 0)
# define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
#define EVERGREEN_GRPH_PITCH 0x6818
#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x681c
#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x6820
#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x6824
#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x6828
#define EVERGREEN_GRPH_X_START 0x682c
#define EVERGREEN_GRPH_Y_START 0x6830
#define EVERGREEN_GRPH_X_END 0x6834
#define EVERGREEN_GRPH_Y_END 0x6838
/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
#define EVERGREEN_CUR_CONTROL 0x6998
# define EVERGREEN_CURSOR_EN (1 << 0)
# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
# define EVERGREEN_CURSOR_MONO 0
# define EVERGREEN_CURSOR_24_1 1
# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
# define EVERGREEN_CURSOR_URGENT_1_8 1
# define EVERGREEN_CURSOR_URGENT_1_4 2
# define EVERGREEN_CURSOR_URGENT_3_8 3
# define EVERGREEN_CURSOR_URGENT_1_2 4
#define EVERGREEN_CUR_SURFACE_ADDRESS 0x699c
# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
#define EVERGREEN_CUR_SIZE 0x69a0
#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x69a4
#define EVERGREEN_CUR_POSITION 0x69a8
#define EVERGREEN_CUR_HOT_SPOT 0x69ac
#define EVERGREEN_CUR_COLOR1 0x69b0
#define EVERGREEN_CUR_COLOR2 0x69b4
#define EVERGREEN_CUR_UPDATE 0x69b8
# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
/* LUT blocks at 0x69e0, 0x75e0, 0x101e0, 0x10de0, 0x119e0, 0x125e0 */
#define EVERGREEN_DC_LUT_RW_MODE 0x69e0
#define EVERGREEN_DC_LUT_RW_INDEX 0x69e4
#define EVERGREEN_DC_LUT_SEQ_COLOR 0x69e8
#define EVERGREEN_DC_LUT_PWL_DATA 0x69ec
#define EVERGREEN_DC_LUT_30_COLOR 0x69f0
#define EVERGREEN_DC_LUT_VGA_ACCESS_ENABLE 0x69f4
#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x69f8
#define EVERGREEN_DC_LUT_AUTOFILL 0x69fc
#define EVERGREEN_DC_LUT_CONTROL 0x6a00
#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x6a04
#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x6a08
#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x6a0c
#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x6a10
#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x6a14
#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x6a18
#define EVERGREEN_DATA_FORMAT 0x6b00
# define EVERGREEN_INTERLEAVE_EN (1 << 0)
#define EVERGREEN_DESKTOP_HEIGHT 0x6b04
#define EVERGREEN_VIEWPORT_START 0x6d70
#define EVERGREEN_VIEWPORT_SIZE 0x6d74
/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
#define EVERGREEN_CRTC0_REGISTER_OFFSET (0x6df0 - 0x6df0)
#define EVERGREEN_CRTC1_REGISTER_OFFSET (0x79f0 - 0x6df0)
#define EVERGREEN_CRTC2_REGISTER_OFFSET (0x105f0 - 0x6df0)
#define EVERGREEN_CRTC3_REGISTER_OFFSET (0x111f0 - 0x6df0)
#define EVERGREEN_CRTC4_REGISTER_OFFSET (0x11df0 - 0x6df0)
#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0)
/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
#define EVERGREEN_CRTC_CONTROL 0x6e70
# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
#endif
......@@ -197,13 +197,13 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32(RADEON_AIC_CNTL, tmp);
/* set address range for PCI address translate */
WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
WREG32(RADEON_AIC_HI_ADDR, tmp);
WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
/* set PCI GART page-table base address */
WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
......@@ -312,9 +312,11 @@ int r100_irq_process(struct radeon_device *rdev)
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 0);
wake_up(&rdev->irq.vblank_queue);
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 1);
wake_up(&rdev->irq.vblank_queue);
}
if (status & RADEON_FP_DETECT_STAT) {
queue_hotplug = true;
......@@ -366,8 +368,8 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
/* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(0x1720, 0));
radeon_ring_write(rdev, (1 << 16) | (1 << 17));
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
......@@ -1701,7 +1703,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev)
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_RBBM_STATUS);
if (!(tmp & (1 << 31))) {
if (!(tmp & RADEON_RBBM_ACTIVE)) {
return 0;
}
DRM_UDELAY(1);
......@@ -1716,8 +1718,8 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
tmp = RREG32(0x0150);
if (tmp & (1 << 2)) {
tmp = RREG32(RADEON_MC_STATUS);
if (tmp & RADEON_MC_IDLE) {
return 0;
}
DRM_UDELAY(1);
......@@ -1790,7 +1792,7 @@ int r100_gpu_reset(struct radeon_device *rdev)
}
/* Check if GPU is idle */
status = RREG32(RADEON_RBBM_STATUS);
if (status & (1 << 31)) {
if (status & RADEON_RBBM_ACTIVE) {
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
return -1;
}
......@@ -1800,6 +1802,9 @@ int r100_gpu_reset(struct radeon_device *rdev)
void r100_set_common_regs(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
bool force_dac2 = false;
/* set these so they don't interfere with anything */
WREG32(RADEON_OV0_SCALE_CNTL, 0);
WREG32(RADEON_SUBPIC_CNTL, 0);
......@@ -1808,6 +1813,68 @@ void r100_set_common_regs(struct radeon_device *rdev)
WREG32(RADEON_DVI_I2C_CNTL_1, 0);
WREG32(RADEON_CAP0_TRIG_CNTL, 0);
WREG32(RADEON_CAP1_TRIG_CNTL, 0);
/* always set up dac2 on rn50 and some rv100 as lots
* of servers seem to wire it up to a VGA port but
* don't report it in the bios connector
* table.
*/
switch (dev->pdev->device) {
/* RN50 */
case 0x515e:
case 0x5969:
force_dac2 = true;
break;
/* RV100*/
case 0x5159:
case 0x515a:
/* DELL triple head servers */
if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
((dev->pdev->subsystem_device == 0x016c) ||
(dev->pdev->subsystem_device == 0x016d) ||
(dev->pdev->subsystem_device == 0x016e) ||
(dev->pdev->subsystem_device == 0x016f) ||
(dev->pdev->subsystem_device == 0x0170) ||
(dev->pdev->subsystem_device == 0x017d) ||
(dev->pdev->subsystem_device == 0x017e) ||
(dev->pdev->subsystem_device == 0x0183) ||
(dev->pdev->subsystem_device == 0x018a) ||
(dev->pdev->subsystem_device == 0x019a)))
force_dac2 = true;
break;
}
if (force_dac2) {
u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
/* For CRT on DAC2, don't turn it on if BIOS didn't
enable it, even it's detected.
*/
/* force it to crtc0 */
dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
/* set up the TV DAC */
tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
RADEON_TV_DAC_STD_MASK |
RADEON_TV_DAC_RDACPD |
RADEON_TV_DAC_GDACPD |
RADEON_TV_DAC_BDACPD |
RADEON_TV_DAC_BGADJ_MASK |
RADEON_TV_DAC_DACADJ_MASK);
tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
RADEON_TV_DAC_NHOLD |
RADEON_TV_DAC_STD_PS2 |
(0x58 << 16));
WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
WREG32(RADEON_DAC_CNTL2, dac2_cntl);
}
}
/*
......@@ -1889,17 +1956,20 @@ static u32 r100_get_accessible_vram(struct radeon_device *rdev)
void r100_vram_init_sizes(struct radeon_device *rdev)
{
u64 config_aper_size;
u32 accessible;
/* work out accessible VRAM */
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
/* FIXME we don't use the second aperture yet when we could use it */
if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
rdev->mc.visible_vram_size = rdev->mc.aper_size;
config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
if (rdev->flags & RADEON_IS_IGP) {
uint32_t tom;
/* read NB_TOM to get the amount of ram stolen for the GPU */
tom = RREG32(RADEON_NB_TOM);
rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
/* for IGPs we need to keep VRAM where it was put by the BIOS */
rdev->mc.vram_location = (tom & 0xffff) << 16;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
} else {
......@@ -1911,30 +1981,19 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
rdev->mc.real_vram_size = 8192 * 1024;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
}
/* let driver place VRAM */
rdev->mc.vram_location = 0xFFFFFFFFUL;
/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
* Novell bug 204882 + along with lots of ubuntu ones */
/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
* Novell bug 204882 + along with lots of ubuntu ones
*/
if (config_aper_size > rdev->mc.real_vram_size)
rdev->mc.mc_vram_size = config_aper_size;
else
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
}
/* work out accessible VRAM */
accessible = r100_get_accessible_vram(rdev);
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
if (accessible > rdev->mc.aper_size)
accessible = rdev->mc.aper_size;
if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
/* FIXME remove this once we support unmappable VRAM */
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
if (rdev->mc.real_vram_size > rdev->mc.aper_size)
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
}
void r100_vga_set_state(struct radeon_device *rdev, bool state)
......@@ -1951,11 +2010,18 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
WREG32(RADEON_CONFIG_CNTL, temp);
}
void r100_vram_info(struct radeon_device *rdev)
void r100_mc_init(struct radeon_device *rdev)
{
r100_vram_get_type(rdev);
u64 base;
r100_vram_get_type(rdev);
r100_vram_init_sizes(rdev);
base = rdev->mc.aper_base;
if (rdev->flags & RADEON_IS_IGP)
base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
radeon_vram_location(rdev, &rdev->mc, base);
if (!(rdev->flags & RADEON_IS_AGP))
radeon_gtt_location(rdev, &rdev->mc);
}
......@@ -3226,10 +3292,9 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
{
/* Update base address for crtc */
WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_location);
WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR,
rdev->mc.vram_location);
WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
}
/* Restore CRTC registers */
WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
......@@ -3390,32 +3455,6 @@ void r100_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
int r100_mc_init(struct radeon_device *rdev)
{
int r;
u32 tmp;
/* Setup GPU memory space */
rdev->mc.vram_location = 0xFFFFFFFFUL;
rdev->mc.gtt_location = 0xFFFFFFFFUL;
if (rdev->flags & RADEON_IS_IGP) {
tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
rdev->mc.vram_location = tmp << 16;
}
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r) {
radeon_agp_disable(rdev);
} else {
rdev->mc.gtt_location = rdev->mc.agp_base;
}
}
r = radeon_mc_setup(rdev);
if (r)
return r;
return 0;
}
int r100_init(struct radeon_device *rdev)
{
int r;
......@@ -3458,12 +3497,15 @@ int r100_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* Get vram informations */
r100_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
r = r100_mc_init(rdev);
if (r)
return r;
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r) {
radeon_agp_disable(rdev);
}
}
/* initialize VRAM */
r100_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
......
......@@ -31,6 +31,7 @@
#include "radeon_reg.h"
#include "radeon.h"
#include "r100d.h"
#include "r200_reg_safe.h"
#include "r100_track.h"
......@@ -79,6 +80,51 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
return vtx_size;
}
int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence)
{
uint32_t size;
uint32_t cur_size;
int i, num_loops;
int r = 0;
/* radeon pitch is /64 */
size = num_pages << PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
r = radeon_ring_lock(rdev, num_loops * 4 + 64);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
/* Must wait for 2D idle & clean before DMA or hangs might happen */
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, (1 << 16));
for (i = 0; i < num_loops; i++) {
cur_size = size;
if (cur_size > 0x1FFFFF) {
cur_size = 0x1FFFFF;
}
size -= cur_size;
radeon_ring_write(rdev, PACKET0(0x720, 2));
radeon_ring_write(rdev, src_offset);
radeon_ring_write(rdev, dst_offset);
radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
src_offset += cur_size;
dst_offset += cur_size;
}
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence);
}
radeon_ring_unlock_commit(rdev);
return r;
}
static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
{
int vtx_size, i, tex_size;
......
......@@ -117,18 +117,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE;
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
table_addr = rdev->gart.table_addr;
WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
/* FIXME: setup default page */
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
/* Clear error */
WREG32_PCIE(0x18, 0);
......@@ -174,18 +175,20 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
/* Write SC register so SC & US assert idle */
radeon_ring_write(rdev, PACKET0(0x43E0, 0));
radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(0x43E4, 0));
radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
radeon_ring_write(rdev, 0);
/* Flush 3D cache */
radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
radeon_ring_write(rdev, (2 << 0));
radeon_ring_write(rdev, PACKET0(0x4F18, 0));
radeon_ring_write(rdev, (1 << 0));
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_ZC_FLUSH);
/* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(0x1720, 0));
radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE));
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
......@@ -198,50 +201,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
}
int r300_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence)
{
uint32_t size;
uint32_t cur_size;
int i, num_loops;
int r = 0;
/* radeon pitch is /64 */
size = num_pages << PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
r = radeon_ring_lock(rdev, num_loops * 4 + 64);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
/* Must wait for 2D idle & clean before DMA or hangs might happen */
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
radeon_ring_write(rdev, (1 << 16));
for (i = 0; i < num_loops; i++) {
cur_size = size;
if (cur_size > 0x1FFFFF) {
cur_size = 0x1FFFFF;
}
size -= cur_size;
radeon_ring_write(rdev, PACKET0(0x720, 2));
radeon_ring_write(rdev, src_offset);
radeon_ring_write(rdev, dst_offset);
radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
src_offset += cur_size;
dst_offset += cur_size;
}
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence);
}
radeon_ring_unlock_commit(rdev);
return r;
}
void r300_ring_start(struct radeon_device *rdev)
{
unsigned gb_tile_config;
......@@ -281,8 +240,8 @@ void r300_ring_start(struct radeon_device *rdev)
radeon_ring_write(rdev,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(0x170C, 0));
radeon_ring_write(rdev, 1 << 31);
radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
......@@ -349,8 +308,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
tmp = RREG32(0x0150);
if (tmp & (1 << 4)) {
tmp = RREG32(RADEON_MC_STATUS);
if (tmp & R300_MC_IDLE) {
return 0;
}
DRM_UDELAY(1);
......@@ -395,8 +354,8 @@ void r300_gpu_init(struct radeon_device *rdev)
"programming pipes. Bad things might happen.\n");
}
tmp = RREG32(0x170C);
WREG32(0x170C, tmp | (1 << 31));
tmp = RREG32(R300_DST_PIPE_CONFIG);
WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
WREG32(R300_RB2D_DSTCACHE_MODE,
R300_DC_AUTOFLUSH_ENABLE |
......@@ -437,8 +396,8 @@ int r300_ga_reset(struct radeon_device *rdev)
/* GA still busy soft reset it */
WREG32(0x429C, 0x200);
WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
WREG32(0x43E0, 0);
WREG32(0x43E4, 0);
WREG32(R300_RE_SCISSORS_TL, 0);
WREG32(R300_RE_SCISSORS_BR, 0);
WREG32(0x24AC, 0);
}
/* Wait to prevent race in RBBM_STATUS */
......@@ -488,7 +447,7 @@ int r300_gpu_reset(struct radeon_device *rdev)
}
/* Check if GPU is idle */
status = RREG32(RADEON_RBBM_STATUS);
if (status & (1 << 31)) {
if (status & RADEON_RBBM_ACTIVE) {
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
return -1;
}
......@@ -500,13 +459,13 @@ int r300_gpu_reset(struct radeon_device *rdev)
/*
* r300,r350,rv350,rv380 VRAM info
*/
void r300_vram_info(struct radeon_device *rdev)
void r300_mc_init(struct radeon_device *rdev)
{
uint32_t tmp;
u64 base;
u32 tmp;
/* DDR for all card after R300 & IGP */
rdev->mc.vram_is_ddr = true;
tmp = RREG32(RADEON_MEM_CNTL);
tmp &= R300_MEM_NUM_CHANNELS_MASK;
switch (tmp) {
......@@ -515,8 +474,13 @@ void r300_vram_info(struct radeon_device *rdev)
case 2: rdev->mc.vram_width = 256; break;
default: rdev->mc.vram_width = 128; break;
}
r100_vram_init_sizes(rdev);
base = rdev->mc.aper_base;
if (rdev->flags & RADEON_IS_IGP)
base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
radeon_vram_location(rdev, &rdev->mc, base);
if (!(rdev->flags & RADEON_IS_AGP))
radeon_gtt_location(rdev, &rdev->mc);
}
void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
......@@ -578,6 +542,40 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
}
int rv370_get_pcie_lanes(struct radeon_device *rdev)
{
u32 link_width_cntl;
if (rdev->flags & RADEON_IS_IGP)
return 0;
if (!(rdev->flags & RADEON_IS_PCIE))
return 0;
/* FIXME wait for idle */
if (rdev->family < CHIP_R600)
link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
else
link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
case RADEON_PCIE_LC_LINK_WIDTH_X0:
return 0;
case RADEON_PCIE_LC_LINK_WIDTH_X1:
return 1;
case RADEON_PCIE_LC_LINK_WIDTH_X2:
return 2;
case RADEON_PCIE_LC_LINK_WIDTH_X4:
return 4;
case RADEON_PCIE_LC_LINK_WIDTH_X8:
return 8;
case RADEON_PCIE_LC_LINK_WIDTH_X16:
default:
return 16;
}
}
#if defined(CONFIG_DEBUG_FS)
static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
{
......@@ -707,6 +705,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
tile_flags |= R300_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_TXO_MICRO_TILE;
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
tmp |= tile_flags;
......@@ -757,6 +757,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
tile_flags |= R300_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_COLOR_MICROTILE_ENABLE;
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
......@@ -828,7 +830,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_DEPTHMACROTILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_DEPTHMICROTILE_TILED;;
tile_flags |= R300_DEPTHMICROTILE_TILED;
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
......@@ -1387,12 +1391,15 @@ int r300_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* Get vram informations */
r300_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
r = r420_mc_init(rdev);
if (r)
return r;
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r) {
radeon_agp_disable(rdev);
}
}
/* initialize memory controller */
r300_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
......
......@@ -33,6 +33,7 @@
#include "drmP.h"
#include "drm.h"
#include "drm_buffer.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
#include "r300_reg.h"
......@@ -299,46 +300,42 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
int reg;
int sz;
int i;
int values[64];
u32 *value;
RING_LOCALS;
sz = header.packet0.count;
reg = (header.packet0.reghi << 8) | header.packet0.reglo;
if ((sz > 64) || (sz < 0)) {
DRM_ERROR
("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
reg, sz);
DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
reg, sz);
return -EINVAL;
}
for (i = 0; i < sz; i++) {
values[i] = ((int *)cmdbuf->buf)[i];
switch (r300_reg_flags[(reg >> 2) + i]) {
case MARK_SAFE:
break;
case MARK_CHECK_OFFSET:
if (!radeon_check_offset(dev_priv, (u32) values[i])) {
DRM_ERROR
("Offset failed range check (reg=%04x sz=%d)\n",
reg, sz);
value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
if (!radeon_check_offset(dev_priv, *value)) {
DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n",
reg, sz);
return -EINVAL;
}
break;
default:
DRM_ERROR("Register %04x failed check as flag=%02x\n",
reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
return -EINVAL;
}
}
BEGIN_RING(1 + sz);
OUT_RING(CP_PACKET0(reg, sz - 1));
OUT_RING_TABLE(values, sz);
OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
cmdbuf->buf += sz * 4;
cmdbuf->bufsz -= sz * 4;
return 0;
}
......@@ -362,7 +359,7 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
if (!sz)
return 0;
if (sz * 4 > cmdbuf->bufsz)
if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
if (reg + sz * 4 >= 0x10000) {
......@@ -380,12 +377,9 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
BEGIN_RING(1 + sz);
OUT_RING(CP_PACKET0(reg, sz - 1));
OUT_RING_TABLE((int *)cmdbuf->buf, sz);
OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
cmdbuf->buf += sz * 4;
cmdbuf->bufsz -= sz * 4;
return 0;
}
......@@ -407,7 +401,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
if (!sz)
return 0;
if (sz * 16 > cmdbuf->bufsz)
if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
/* VAP is very sensitive so we purge cache before we program it
......@@ -426,7 +420,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
BEGIN_RING(3 + sz * 4);
OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4);
ADVANCE_RING();
BEGIN_RING(2);
......@@ -434,9 +428,6 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
OUT_RING(0);
ADVANCE_RING();
cmdbuf->buf += sz * 16;
cmdbuf->bufsz -= sz * 16;
return 0;
}
......@@ -449,14 +440,14 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
{
RING_LOCALS;
if (8 * 4 > cmdbuf->bufsz)
if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
BEGIN_RING(10);
OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
(1 << R300_PRIM_NUM_VERTICES_SHIFT));
OUT_RING_TABLE((int *)cmdbuf->buf, 8);
OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8);
ADVANCE_RING();
BEGIN_RING(4);
......@@ -468,9 +459,6 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
/* set flush flag */
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
cmdbuf->buf += 8 * 4;
cmdbuf->bufsz -= 8 * 4;
return 0;
}
......@@ -480,28 +468,29 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
{
int count, i, k;
#define MAX_ARRAY_PACKET 64
u32 payload[MAX_ARRAY_PACKET];
u32 *data;
u32 narrays;
RING_LOCALS;
count = (header >> 16) & 0x3fff;
count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16;
if ((count + 1) > MAX_ARRAY_PACKET) {
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
count);
return -EINVAL;
}
memset(payload, 0, MAX_ARRAY_PACKET * 4);
memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
/* carefully check packet contents */
narrays = payload[0];
/* We have already read the header so advance the buffer. */
drm_buffer_advance(cmdbuf->buffer, 4);
narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
k = 0;
i = 1;
while ((k < narrays) && (i < (count + 1))) {
i++; /* skip attribute field */
if (!radeon_check_offset(dev_priv, payload[i])) {
data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
if (!radeon_check_offset(dev_priv, *data)) {
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
......@@ -512,7 +501,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
if (k == narrays)
break;
/* have one more to process, they come in pairs */
if (!radeon_check_offset(dev_priv, payload[i])) {
data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
if (!radeon_check_offset(dev_priv, *data)) {
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
......@@ -533,30 +523,30 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
BEGIN_RING(count + 2);
OUT_RING(header);
OUT_RING_TABLE(payload, count + 1);
OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1);
ADVANCE_RING();
cmdbuf->buf += (count + 2) * 4;
cmdbuf->bufsz -= (count + 2) * 4;
return 0;
}
static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
u32 *cmd = (u32 *) cmdbuf->buf;
u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
int count, ret;
RING_LOCALS;
count=(cmd[0]>>16) & 0x3fff;
if (cmd[0] & 0x8000) {
u32 offset;
count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
if (*cmd & 0x8000) {
u32 offset;
u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
offset = cmd[2] << 10;
u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
offset = *cmd2 << 10;
ret = !radeon_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
......@@ -564,9 +554,10 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
}
}
if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
(cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
offset = cmd[3] << 10;
if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
(*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
offset = *cmd3 << 10;
ret = !radeon_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
......@@ -577,28 +568,25 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
}
BEGIN_RING(count+2);
OUT_RING(cmd[0]);
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
ADVANCE_RING();
cmdbuf->buf += (count+2)*4;
cmdbuf->bufsz -= (count+2)*4;
return 0;
}
static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
u32 *cmd;
u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
int count;
int expected_count;
RING_LOCALS;
cmd = (u32 *) cmdbuf->buf;
count = (cmd[0]>>16) & 0x3fff;
expected_count = cmd[1] >> 16;
if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
expected_count = *cmd1 >> 16;
if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
expected_count = (expected_count+1)/2;
if (count && count != expected_count) {
......@@ -608,55 +596,53 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
}
BEGIN_RING(count+2);
OUT_RING(cmd[0]);
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
ADVANCE_RING();
cmdbuf->buf += (count+2)*4;
cmdbuf->bufsz -= (count+2)*4;
if (!count) {
drm_r300_cmd_header_t header;
drm_r300_cmd_header_t stack_header, *header;
u32 *cmd1, *cmd2, *cmd3;
if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
if (drm_buffer_unprocessed(cmdbuf->buffer)
< 4*4 + sizeof(stack_header)) {
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
return -EINVAL;
}
header.u = *(unsigned int *)cmdbuf->buf;
header = drm_buffer_read_object(cmdbuf->buffer,
sizeof(stack_header), &stack_header);
cmdbuf->buf += sizeof(header);
cmdbuf->bufsz -= sizeof(header);
cmd = (u32 *) cmdbuf->buf;
cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
if (header.header.cmd_type != R300_CMD_PACKET3 ||
header.packet3.packet != R300_CMD_PACKET3_RAW ||
cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
if (header->header.cmd_type != R300_CMD_PACKET3 ||
header->packet3.packet != R300_CMD_PACKET3_RAW ||
*cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
return -EINVAL;
}
if ((cmd[1] & 0x8000ffff) != 0x80000810) {
DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
if ((*cmd1 & 0x8000ffff) != 0x80000810) {
DRM_ERROR("Invalid indx_buffer reg address %08X\n",
*cmd1);
return -EINVAL;
}
if (!radeon_check_offset(dev_priv, cmd[2])) {
DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
if (!radeon_check_offset(dev_priv, *cmd2)) {
DRM_ERROR("Invalid indx_buffer offset is %08X\n",
*cmd2);
return -EINVAL;
}
if (cmd[3] != expected_count) {
if (*cmd3 != expected_count) {
DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
cmd[3], expected_count);
*cmd3, expected_count);
return -EINVAL;
}
BEGIN_RING(4);
OUT_RING(cmd[0]);
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4);
ADVANCE_RING();
cmdbuf->buf += 4*4;
cmdbuf->bufsz -= 4*4;
}
return 0;
......@@ -665,39 +651,39 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
u32 header;
u32 *header;
int count;
RING_LOCALS;
if (4 > cmdbuf->bufsz)
if (4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
/* Fixme !! This simply emits a packet without much checking.
We need to be smarter. */
/* obtain first word - actual packet3 header */
header = *(u32 *) cmdbuf->buf;
header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
/* Is it packet 3 ? */
if ((header >> 30) != 0x3) {
DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
if ((*header >> 30) != 0x3) {
DRM_ERROR("Not a packet3 header (0x%08x)\n", *header);
return -EINVAL;
}
count = (header >> 16) & 0x3fff;
count = (*header >> 16) & 0x3fff;
/* Check again now that we know how much data to expect */
if ((count + 2) * 4 > cmdbuf->bufsz) {
if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) {
DRM_ERROR
("Expected packet3 of length %d but have only %d bytes left\n",
(count + 2) * 4, cmdbuf->bufsz);
(count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer));
return -EINVAL;
}
/* Is it a packet type we know about ? */
switch (header & 0xff00) {
switch (*header & 0xff00) {
case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header);
case RADEON_CNTL_BITBLT_MULTI:
return r300_emit_bitblt_multi(dev_priv, cmdbuf);
......@@ -723,18 +709,14 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
/* these packets are safe */
break;
default:
DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header);
return -EINVAL;
}
BEGIN_RING(count + 2);
OUT_RING(header);
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
ADVANCE_RING();
cmdbuf->buf += (count + 2) * 4;
cmdbuf->bufsz -= (count + 2) * 4;
return 0;
}
......@@ -748,8 +730,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
{
int n;
int ret;
char *orig_buf = cmdbuf->buf;
int orig_bufsz = cmdbuf->bufsz;
int orig_iter = cmdbuf->buffer->iterator;
/* This is a do-while-loop so that we run the interior at least once,
* even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
......@@ -761,8 +742,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
if (ret)
return ret;
cmdbuf->buf = orig_buf;
cmdbuf->bufsz = orig_bufsz;
cmdbuf->buffer->iterator = orig_iter;
}
switch (header.packet3.packet) {
......@@ -785,9 +765,9 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
break;
default:
DRM_ERROR("bad packet3 type %i at %p\n",
DRM_ERROR("bad packet3 type %i at byte %d\n",
header.packet3.packet,
cmdbuf->buf - sizeof(header));
cmdbuf->buffer->iterator - sizeof(header));
return -EINVAL;
}
......@@ -923,12 +903,13 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
drm_r300_cmd_header_t header)
{
u32 *ref_age_base;
u32 i, buf_idx, h_pending;
u64 ptr_addr;
u32 i, *buf_idx, h_pending;
u64 *ptr_addr;
u64 stack_ptr_addr;
RING_LOCALS;
if (cmdbuf->bufsz <
(sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
if (drm_buffer_unprocessed(cmdbuf->buffer) <
(sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) {
return -EINVAL;
}
......@@ -938,36 +919,35 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
dev_priv->scratch_ages[header.scratch.reg]++;
ptr_addr = get_unaligned((u64 *)cmdbuf->buf);
ref_age_base = (u32 *)(unsigned long)ptr_addr;
cmdbuf->buf += sizeof(u64);
cmdbuf->bufsz -= sizeof(u64);
ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
sizeof(stack_ptr_addr), &stack_ptr_addr);
ref_age_base = (u32 *)(unsigned long)*ptr_addr;
for (i=0; i < header.scratch.n_bufs; i++) {
buf_idx = *(u32 *)cmdbuf->buf;
buf_idx *= 2; /* 8 bytes per buf */
buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
*buf_idx *= 2; /* 8 bytes per buf */
if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
&dev_priv->scratch_ages[header.scratch.reg],
sizeof(u32)))
return -EINVAL;
}
if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
if (DRM_COPY_FROM_USER(&h_pending,
ref_age_base + *buf_idx + 1,
sizeof(u32)))
return -EINVAL;
}
if (h_pending == 0) {
if (h_pending == 0)
return -EINVAL;
}
h_pending--;
if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
&h_pending,
sizeof(u32)))
return -EINVAL;
}
cmdbuf->buf += sizeof(buf_idx);
cmdbuf->bufsz -= sizeof(buf_idx);
drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx));
}
BEGIN_RING(2);
......@@ -1009,19 +989,16 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
if (!sz)
return 0;
if (sz * stride * 4 > cmdbuf->bufsz)
if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
BEGIN_RING(3 + sz * stride);
OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride);
ADVANCE_RING();
cmdbuf->buf += sz * stride * 4;
cmdbuf->bufsz -= sz * stride * 4;
return 0;
}
......@@ -1053,19 +1030,18 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
goto cleanup;
}
while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
while (drm_buffer_unprocessed(cmdbuf->buffer)
>= sizeof(drm_r300_cmd_header_t)) {
int idx;
drm_r300_cmd_header_t header;
header.u = *(unsigned int *)cmdbuf->buf;
drm_r300_cmd_header_t *header, stack_header;
cmdbuf->buf += sizeof(header);
cmdbuf->bufsz -= sizeof(header);
header = drm_buffer_read_object(cmdbuf->buffer,
sizeof(stack_header), &stack_header);
switch (header.header.cmd_type) {
switch (header->header.cmd_type) {
case R300_CMD_PACKET0:
DRM_DEBUG("R300_CMD_PACKET0\n");
ret = r300_emit_packet0(dev_priv, cmdbuf, header);
ret = r300_emit_packet0(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_packet0 failed\n");
goto cleanup;
......@@ -1074,7 +1050,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_VPU:
DRM_DEBUG("R300_CMD_VPU\n");
ret = r300_emit_vpu(dev_priv, cmdbuf, header);
ret = r300_emit_vpu(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_vpu failed\n");
goto cleanup;
......@@ -1083,7 +1059,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_PACKET3:
DRM_DEBUG("R300_CMD_PACKET3\n");
ret = r300_emit_packet3(dev_priv, cmdbuf, header);
ret = r300_emit_packet3(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_packet3 failed\n");
goto cleanup;
......@@ -1117,8 +1093,8 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
int i;
RING_LOCALS;
BEGIN_RING(header.delay.count);
for (i = 0; i < header.delay.count; i++)
BEGIN_RING(header->delay.count);
for (i = 0; i < header->delay.count; i++)
OUT_RING(RADEON_CP_PACKET2);
ADVANCE_RING();
}
......@@ -1126,7 +1102,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_DMA_DISCARD:
DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
idx = header.dma.buf_idx;
idx = header->dma.buf_idx;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
idx, dma->buf_count - 1);
......@@ -1149,12 +1125,12 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_WAIT:
DRM_DEBUG("R300_CMD_WAIT\n");
r300_cmd_wait(dev_priv, header);
r300_cmd_wait(dev_priv, *header);
break;
case R300_CMD_SCRATCH:
DRM_DEBUG("R300_CMD_SCRATCH\n");
ret = r300_scratch(dev_priv, cmdbuf, header);
ret = r300_scratch(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_scratch failed\n");
goto cleanup;
......@@ -1168,16 +1144,16 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
goto cleanup;
}
DRM_DEBUG("R300_CMD_R500FP\n");
ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
ret = r300_emit_r500fp(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_r500fp failed\n");
goto cleanup;
}
break;
default:
DRM_ERROR("bad cmd_type %i at %p\n",
header.header.cmd_type,
cmdbuf->buf - sizeof(header));
DRM_ERROR("bad cmd_type %i at byte %d\n",
header->header.cmd_type,
cmdbuf->buffer->iterator - sizeof(*header));
ret = -EINVAL;
goto cleanup;
}
......
......@@ -952,6 +952,7 @@
# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
# define R300_TXO_MACRO_TILE (1 << 2)
# define R300_TXO_MICRO_TILE (1 << 3)
# define R300_TXO_MICRO_TILE_SQUARE (2 << 3)
# define R300_TXO_OFFSET_MASK 0xffffffe0
# define R300_TXO_OFFSET_SHIFT 5
/* END: Guess from R200 */
......@@ -1360,6 +1361,7 @@
# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
# define R300_COLOR_MICROTILE_SQUARE_ENABLE (2 << 17)
# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
......
......@@ -40,28 +40,6 @@ static void r420_set_reg_safe(struct radeon_device *rdev)
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
}
int r420_mc_init(struct radeon_device *rdev)
{
int r;
/* Setup GPU memory space */
rdev->mc.vram_location = 0xFFFFFFFFUL;
rdev->mc.gtt_location = 0xFFFFFFFFUL;
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r) {
radeon_agp_disable(rdev);
} else {
rdev->mc.gtt_location = rdev->mc.agp_base;
}
}
r = radeon_mc_setup(rdev);
if (r) {
return r;
}
return 0;
}
void r420_pipes_init(struct radeon_device *rdev)
{
unsigned tmp;
......@@ -69,7 +47,8 @@ void r420_pipes_init(struct radeon_device *rdev)
unsigned num_pipes;
/* GA_ENHANCE workaround TCL deadlock issue */
WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
(1 << 2) | (1 << 3));
/* add idle wait as per freedesktop.org bug 24041 */
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
......@@ -97,17 +76,17 @@ void r420_pipes_init(struct radeon_device *rdev)
tmp = (7 << 1);
break;
}
WREG32(0x42C8, (1 << num_pipes) - 1);
WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
/* Sub pixel 1/12 so we can have 4K rendering according to doc */
tmp |= (1 << 4) | (1 << 0);
WREG32(0x4018, tmp);
tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
WREG32(R300_GB_TILE_CONFIG, tmp);
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
}
tmp = RREG32(0x170C);
WREG32(0x170C, tmp | (1 << 31));
tmp = RREG32(R300_DST_PIPE_CONFIG);
WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
WREG32(R300_RB2D_DSTCACHE_MODE,
RREG32(R300_RB2D_DSTCACHE_MODE) |
......@@ -348,13 +327,15 @@ int r420_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* Get vram informations */
r300_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
r = r420_mc_init(rdev);
if (r) {
return r;
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r) {
radeon_agp_disable(rdev);
}
}
/* initialize memory controller */
r300_mc_init(rdev);
r420_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
......
......@@ -717,54 +717,62 @@
#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
#define AVIVO_DC_GPIO_HPD_A 0x7e94
#define AVIVO_GPIO_0 0x7e30
#define AVIVO_GPIO_1 0x7e40
#define AVIVO_GPIO_2 0x7e50
#define AVIVO_GPIO_3 0x7e60
#define AVIVO_DC_GPIO_HPD_Y 0x7e9c
#define AVIVO_I2C_STATUS 0x7d30
# define AVIVO_I2C_STATUS_DONE (1 << 0)
# define AVIVO_I2C_STATUS_NACK (1 << 1)
# define AVIVO_I2C_STATUS_HALT (1 << 2)
# define AVIVO_I2C_STATUS_GO (1 << 3)
# define AVIVO_I2C_STATUS_MASK 0x7
/* If radeon_mm_i2c is to be believed, this is HALT, NACK, and maybe
* DONE? */
# define AVIVO_I2C_STATUS_CMD_RESET 0x7
# define AVIVO_I2C_STATUS_CMD_WAIT (1 << 3)
#define AVIVO_I2C_STOP 0x7d34
#define AVIVO_I2C_START_CNTL 0x7d38
# define AVIVO_I2C_START (1 << 8)
# define AVIVO_I2C_CONNECTOR0 (0 << 16)
# define AVIVO_I2C_CONNECTOR1 (1 << 16)
#define R520_I2C_START (1<<0)
#define R520_I2C_STOP (1<<1)
#define R520_I2C_RX (1<<2)
#define R520_I2C_EN (1<<8)
#define R520_I2C_DDC1 (0<<16)
#define R520_I2C_DDC2 (1<<16)
#define R520_I2C_DDC3 (2<<16)
#define R520_I2C_DDC_MASK (3<<16)
#define AVIVO_I2C_CONTROL2 0x7d3c
# define AVIVO_I2C_7D3C_SIZE_SHIFT 8
# define AVIVO_I2C_7D3C_SIZE_MASK (0xf << 8)
#define AVIVO_I2C_CONTROL3 0x7d40
/* Reading is done 4 bytes at a time: read the bottom 8 bits from
* 7d44, four times in a row.
* Writing is a little more complex. First write DATA with
* 0xnnnnnnzz, then 0xnnnnnnyy, where nnnnnn is some non-deterministic
* magic number, zz is, I think, the slave address, and yy is the byte
* you want to write. */
#define AVIVO_I2C_DATA 0x7d44
#define R520_I2C_ADDR_COUNT_MASK (0x7)
#define R520_I2C_DATA_COUNT_SHIFT (8)
#define R520_I2C_DATA_COUNT_MASK (0xF00)
#define AVIVO_I2C_CNTL 0x7d50
# define AVIVO_I2C_EN (1 << 0)
# define AVIVO_I2C_RESET (1 << 8)
#define AVIVO_DC_I2C_STATUS1 0x7d30
# define AVIVO_DC_I2C_DONE (1 << 0)
# define AVIVO_DC_I2C_NACK (1 << 1)
# define AVIVO_DC_I2C_HALT (1 << 2)
# define AVIVO_DC_I2C_GO (1 << 3)
#define AVIVO_DC_I2C_RESET 0x7d34
# define AVIVO_DC_I2C_SOFT_RESET (1 << 0)
# define AVIVO_DC_I2C_ABORT (1 << 8)
#define AVIVO_DC_I2C_CONTROL1 0x7d38
# define AVIVO_DC_I2C_START (1 << 0)
# define AVIVO_DC_I2C_STOP (1 << 1)
# define AVIVO_DC_I2C_RECEIVE (1 << 2)
# define AVIVO_DC_I2C_EN (1 << 8)
# define AVIVO_DC_I2C_PIN_SELECT(x) ((x) << 16)
# define AVIVO_SEL_DDC1 0
# define AVIVO_SEL_DDC2 1
# define AVIVO_SEL_DDC3 2
#define AVIVO_DC_I2C_CONTROL2 0x7d3c
# define AVIVO_DC_I2C_ADDR_COUNT(x) ((x) << 0)
# define AVIVO_DC_I2C_DATA_COUNT(x) ((x) << 8)
#define AVIVO_DC_I2C_CONTROL3 0x7d40
# define AVIVO_DC_I2C_DATA_DRIVE_EN (1 << 0)
# define AVIVO_DC_I2C_DATA_DRIVE_SEL (1 << 1)
# define AVIVO_DC_I2C_CLK_DRIVE_EN (1 << 7)
# define AVIVO_DC_I2C_RD_INTRA_BYTE_DELAY(x) ((x) << 8)
# define AVIVO_DC_I2C_WR_INTRA_BYTE_DELAY(x) ((x) << 16)
# define AVIVO_DC_I2C_TIME_LIMIT(x) ((x) << 24)
#define AVIVO_DC_I2C_DATA 0x7d44
#define AVIVO_DC_I2C_INTERRUPT_CONTROL 0x7d48
# define AVIVO_DC_I2C_INTERRUPT_STATUS (1 << 0)
# define AVIVO_DC_I2C_INTERRUPT_AK (1 << 8)
# define AVIVO_DC_I2C_INTERRUPT_ENABLE (1 << 16)
#define AVIVO_DC_I2C_ARBITRATION 0x7d50
# define AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C (1 << 0)
# define AVIVO_DC_I2C_SW_CAN_USE_I2C (1 << 1)
# define AVIVO_DC_I2C_SW_DONE_USING_I2C (1 << 8)
# define AVIVO_DC_I2C_HW_NEEDS_I2C (1 << 9)
# define AVIVO_DC_I2C_ABORT_HDCP_I2C (1 << 16)
# define AVIVO_DC_I2C_HW_USING_I2C (1 << 17)
#define AVIVO_DC_GPIO_DDC1_MASK 0x7e40
#define AVIVO_DC_GPIO_DDC1_A 0x7e44
#define AVIVO_DC_GPIO_DDC1_EN 0x7e48
#define AVIVO_DC_GPIO_DDC1_Y 0x7e4c
#define AVIVO_DC_GPIO_DDC2_MASK 0x7e50
#define AVIVO_DC_GPIO_DDC2_A 0x7e54
#define AVIVO_DC_GPIO_DDC2_EN 0x7e58
#define AVIVO_DC_GPIO_DDC2_Y 0x7e5c
#define AVIVO_DC_GPIO_DDC3_MASK 0x7e60
#define AVIVO_DC_GPIO_DDC3_A 0x7e64
#define AVIVO_DC_GPIO_DDC3_EN 0x7e68
#define AVIVO_DC_GPIO_DDC3_Y 0x7e6c
#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc
# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4)
......
......@@ -119,13 +119,15 @@ static void r520_vram_get_type(struct radeon_device *rdev)
rdev->mc.vram_width *= 2;
}
void r520_vram_info(struct radeon_device *rdev)
void r520_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
r520_vram_get_type(rdev);
r100_vram_init_sizes(rdev);
radeon_vram_location(rdev, &rdev->mc, 0);
if (!(rdev->flags & RADEON_IS_AGP))
radeon_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
......@@ -267,12 +269,15 @@ int r520_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* Get vram informations */
r520_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
r = r420_mc_init(rdev);
if (r)
return r;
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r) {
radeon_agp_disable(rdev);
}
}
/* initialize memory controller */
r520_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
......
......@@ -353,23 +353,14 @@ void r600_hpd_fini(struct radeon_device *rdev)
/*
* R600 PCIE GART
*/
int r600_gart_clear_page(struct radeon_device *rdev, int i)
{
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
u64 pte;
if (i < 0 || i > rdev->gart.num_gpu_pages)
return -EINVAL;
pte = 0;
writeq(pte, ((void __iomem *)ptr) + (i * 8));
return 0;
}
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
{
unsigned i;
u32 tmp;
/* flush hdp cache so updates hit vram */
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
......@@ -416,6 +407,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
......@@ -619,6 +611,68 @@ static void r600_mc_program(struct radeon_device *rdev)
rv515_vga_render_disable(rdev);
}
/**
* r600_vram_gtt_location - try to find VRAM & GTT location
* @rdev: radeon device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
* Function will place try to place VRAM at same place as in CPU (PCI)
* address space as some GPU seems to have issue when we reprogram at
* different address space.
*
* If there is not enough space to fit the unvisible VRAM after the
* aperture then we limit the VRAM size to the aperture.
*
* If we are using AGP then place VRAM adjacent to AGP aperture are we need
* them to be in one from GPU point of view so that we can program GPU to
* catch access outside them (weird GPU policy see ??).
*
* This function will never fails, worst case are limiting VRAM or GTT.
*
* Note: GTT start, end, size should be initialized before calling this
* function on AGP platform.
*/
void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_bf, size_af;
if (mc->mc_vram_size > 0xE0000000) {
/* leave room for at least 512M GTT */
dev_warn(rdev->dev, "limiting VRAM\n");
mc->real_vram_size = 0xE0000000;
mc->mc_vram_size = 0xE0000000;
}
if (rdev->flags & RADEON_IS_AGP) {
size_bf = mc->gtt_start;
size_af = 0xFFFFFFFF - mc->gtt_end + 1;
if (size_bf > size_af) {
if (mc->mc_vram_size > size_bf) {
dev_warn(rdev->dev, "limiting VRAM\n");
mc->real_vram_size = size_bf;
mc->mc_vram_size = size_bf;
}
mc->vram_start = mc->gtt_start - mc->mc_vram_size;
} else {
if (mc->mc_vram_size > size_af) {
dev_warn(rdev->dev, "limiting VRAM\n");
mc->real_vram_size = size_af;
mc->mc_vram_size = size_af;
}
mc->vram_start = mc->gtt_end;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
} else {
u64 base = 0;
if (rdev->flags & RADEON_IS_IGP)
base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
radeon_vram_location(rdev, &rdev->mc, base);
radeon_gtt_location(rdev, mc);
}
}
int r600_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
......@@ -658,75 +712,21 @@ int r600_mc_init(struct radeon_device *rdev)
/* Setup GPU memory space */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
rdev->mc.visible_vram_size = rdev->mc.aper_size;
/* FIXME remove this once we support unmappable VRAM */
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
if (rdev->mc.real_vram_size > rdev->mc.aper_size)
rdev->mc.real_vram_size = rdev->mc.aper_size;
if (rdev->flags & RADEON_IS_AGP) {
/* gtt_size is setup by radeon_agp_init */
rdev->mc.gtt_location = rdev->mc.agp_base;
tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
/* Try to put vram before or after AGP because we
* we want SYSTEM_APERTURE to cover both VRAM and
* AGP so that GPU can catch out of VRAM/AGP access
*/
if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
/* Enough place before */
rdev->mc.vram_location = rdev->mc.gtt_location -
rdev->mc.mc_vram_size;
} else if (tmp > rdev->mc.mc_vram_size) {
/* Enough place after */
rdev->mc.vram_location = rdev->mc.gtt_location +
rdev->mc.gtt_size;
} else {
/* Try to setup VRAM then AGP might not
* not work on some card
*/
rdev->mc.vram_location = 0x00000000UL;
rdev->mc.gtt_location = rdev->mc.mc_vram_size;
}
} else {
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
0xFFFF) << 24;
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
/* Enough place after vram */
rdev->mc.gtt_location = tmp;
} else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
/* Enough place before vram */
rdev->mc.gtt_location = 0;
} else {
/* Not enough place after or before shrink
* gart size
*/
if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
rdev->mc.gtt_location = 0;
rdev->mc.gtt_size = rdev->mc.vram_location;
} else {
rdev->mc.gtt_location = tmp;
rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
}
}
rdev->mc.gtt_location = rdev->mc.mc_vram_size;
}
rdev->mc.vram_start = rdev->mc.vram_location;
rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
rdev->mc.gtt_start = rdev->mc.gtt_location;
rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
r600_vram_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
if (rdev->flags & RADEON_IS_IGP)
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
return 0;
}
......@@ -981,6 +981,9 @@ void r600_gpu_init(struct radeon_device *rdev)
{
u32 tiling_config;
u32 ramcfg;
u32 backend_map;
u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config;
u32 tmp;
int i, j;
u32 sq_config;
......@@ -1090,8 +1093,11 @@ void r600_gpu_init(struct radeon_device *rdev)
default:
break;
}
rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= GROUP_SIZE(0);
rdev->config.r600.tiling_group_size = 256;
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) {
tiling_config |= ROW_TILING(3);
......@@ -1101,24 +1107,33 @@ void r600_gpu_init(struct radeon_device *rdev)
tiling_config |= SAMPLE_SPLIT(tmp);
}
tiling_config |= BANK_SWAPS(1);
tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
rdev->config.r600.max_backends,
(0xff << rdev->config.r600.max_backends) & 0xff);
tiling_config |= BACKEND_MAP(tmp);
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
cc_rb_backend_disable |=
BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
(R6XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable &
R6XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
WREG32(CC_RB_BACKEND_DISABLE, tmp);
/* Setup pipes */
tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
......@@ -1783,12 +1798,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
/* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
/* wait for 3D idle clean */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
radeon_ring_write(rdev, 1);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
radeon_ring_write(rdev, RB_INT_STAT);
......@@ -2745,6 +2765,7 @@ int r600_irq_process(struct radeon_device *rdev)
case 0: /* D1 vblank */
if (disp_int & LB_D1_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 0);
wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
......@@ -2765,6 +2786,7 @@ int r600_irq_process(struct radeon_device *rdev)
case 0: /* D2 vblank */
if (disp_int & LB_D2_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 1);
wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
......
......@@ -146,6 +146,15 @@ static void r600_audio_update_hdmi(unsigned long param)
jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
}
/*
* turn on/off audio engine
*/
static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
{
DRM_INFO("%s audio support", enable ? "Enabling" : "Disabling");
WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
}
/*
* initialize the audio vars and register the update timer
*/
......@@ -154,8 +163,7 @@ int r600_audio_init(struct radeon_device *rdev)
if (!r600_audio_chipset_supported(rdev))
return 0;
DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling");
WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
r600_audio_engine_enable(rdev, radeon_audio);
rdev->audio_channels = -1;
rdev->audio_rate = -1;
......@@ -263,4 +271,6 @@ void r600_audio_fini(struct radeon_device *rdev)
del_timer(&rdev->audio_timer);
WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
r600_audio_engine_enable(rdev, false);
}
......@@ -403,8 +403,6 @@ set_default_state(struct radeon_device *rdev)
radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
radeon_ring_write(rdev, dwords);
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
/* SQ config */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
......@@ -578,9 +576,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
ring_size += 40; /* shaders + def state */
ring_size += 7; /* fence emit for VB IB */
ring_size += 10; /* fence emit for VB IB */
ring_size += 5; /* done copy */
ring_size += 7; /* fence emit for done copy */
ring_size += 10; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
if (r)
return r;
......@@ -594,13 +592,6 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
{
int r;
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
/* wait for 3D idle clean */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
if (rdev->r600_blit.vb_ib)
r600_vb_ib_put(rdev);
......
......@@ -9,11 +9,6 @@ const u32 r6xx_default_state[] =
0xc0012800,
0x80000000,
0x80000000,
0xc0004600,
0x00000016,
0xc0016800,
0x00000010,
0x00028000,
0xc0016800,
0x00000010,
0x00008000,
......@@ -531,11 +526,6 @@ const u32 r7xx_default_state[] =
0xc0012800,
0x80000000,
0x80000000,
0xc0004600,
0x00000016,
0xc0016800,
0x00000010,
0x00028000,
0xc0016800,
0x00000010,
0x00008000,
......
......@@ -734,8 +734,8 @@ static void r600_gfx_init(struct drm_device *dev,
u32 hdp_host_path_cntl;
u32 backend_map;
u32 gb_tiling_config = 0;
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config = 0;
u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config;
u32 ramcfg;
/* setup chip specs */
......@@ -857,29 +857,44 @@ static void r600_gfx_init(struct drm_device *dev,
gb_tiling_config |= R600_BANK_SWAPS(1);
backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
dev_priv->r600_max_backends,
(0xff << dev_priv->r600_max_backends) & 0xff);
gb_tiling_config |= R600_BACKEND_MAP(backend_map);
cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
cc_rb_backend_disable |=
R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
cc_gc_shader_pipe_config =
cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
cc_gc_shader_pipe_config |=
R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK);
cc_rb_backend_disable =
R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
(R6XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable &
R6XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
gb_tiling_config |= R600_BACKEND_MAP(backend_map);
RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
if (gb_tiling_config & 0xc0) {
dev_priv->r600_group_size = 512;
} else {
dev_priv->r600_group_size = 256;
}
dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
if (gb_tiling_config & 0x30) {
dev_priv->r600_nbanks = 8;
} else {
dev_priv->r600_nbanks = 4;
}
RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
num_qd_pipes =
R6XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK);
R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
......@@ -1151,7 +1166,8 @@ static void r600_gfx_init(struct drm_device *dev,
}
static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
static u32 r700_get_tile_pipe_to_backend_map(drm_radeon_private_t *dev_priv,
u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
......@@ -1162,6 +1178,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
u32 swizzle_pipe[R7XX_MAX_PIPES];
u32 cur_backend;
u32 i;
bool force_no_swizzle;
if (num_tile_pipes > R7XX_MAX_PIPES)
num_tile_pipes = R7XX_MAX_PIPES;
......@@ -1191,6 +1208,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_RV770:
case CHIP_RV730:
force_no_swizzle = false;
break;
case CHIP_RV710:
case CHIP_RV740:
default:
force_no_swizzle = true;
break;
}
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
......@@ -1201,49 +1230,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
swizzle_pipe[1] = 1;
break;
case 3:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
}
break;
case 4:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 3;
swizzle_pipe[3] = 1;
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 3;
swizzle_pipe[3] = 1;
}
break;
case 5:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
}
break;
case 6:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 5;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 5;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
}
break;
case 7:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 5;
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 5;
}
break;
case 8:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 7;
swizzle_pipe[7] = 5;
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
swizzle_pipe[7] = 7;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 7;
swizzle_pipe[7] = 5;
}
break;
}
......@@ -1264,8 +1344,10 @@ static void r700_gfx_init(struct drm_device *dev,
drm_radeon_private_t *dev_priv)
{
int i, j, num_qd_pipes;
u32 ta_aux_cntl;
u32 sx_debug_1;
u32 smx_dc_ctl0;
u32 db_debug3;
u32 num_gs_verts_per_thread;
u32 vgt_gs_per_es;
u32 gs_prim_buffer_depth = 0;
......@@ -1276,8 +1358,8 @@ static void r700_gfx_init(struct drm_device *dev,
u32 sq_dyn_gpr_size_simd_ab_0;
u32 backend_map;
u32 gb_tiling_config = 0;
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config = 0;
u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config;
u32 mc_arb_ramcfg;
u32 db_debug4;
......@@ -1428,38 +1510,51 @@ static void r700_gfx_init(struct drm_device *dev,
gb_tiling_config |= R600_BANK_SWAPS(1);
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
backend_map = 0x28;
else
backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
dev_priv->r600_max_backends,
(0xff << dev_priv->r600_max_backends) & 0xff);
gb_tiling_config |= R600_BACKEND_MAP(backend_map);
cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
cc_rb_backend_disable |=
R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
cc_gc_shader_pipe_config =
cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
cc_gc_shader_pipe_config |=
R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK);
cc_rb_backend_disable =
R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
backend_map = 0x28;
else
backend_map = r700_get_tile_pipe_to_backend_map(dev_priv,
dev_priv->r600_max_tile_pipes,
(R7XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable &
R7XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
gb_tiling_config |= R600_BACKEND_MAP(backend_map);
RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
if (gb_tiling_config & 0xc0) {
dev_priv->r600_group_size = 512;
} else {
dev_priv->r600_group_size = 256;
}
dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
if (gb_tiling_config & 0x30) {
dev_priv->r600_nbanks = 8;
} else {
dev_priv->r600_nbanks = 4;
}
RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
num_qd_pipes =
R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK);
R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
......@@ -1469,10 +1564,8 @@ static void r700_gfx_init(struct drm_device *dev,
RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30));
RADEON_WRITE(R600_TA_CNTL_AUX, (R600_DISABLE_CUBE_ANISO |
R600_SYNC_GRADIENT |
R600_SYNC_WALKER |
R600_SYNC_ALIGNER));
ta_aux_cntl = RADEON_READ(R600_TA_CNTL_AUX);
RADEON_WRITE(R600_TA_CNTL_AUX, ta_aux_cntl | R600_DISABLE_CUBE_ANISO);
sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1);
sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS;
......@@ -1483,14 +1576,28 @@ static void r700_gfx_init(struct drm_device *dev,
smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1);
RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0);
RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
R700_GS_FLUSH_CTL(4) |
R700_ACK_FLUSH_CTL(3) |
R700_SYNC_FLUSH_CTL));
if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV740)
RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
R700_GS_FLUSH_CTL(4) |
R700_ACK_FLUSH_CTL(3) |
R700_SYNC_FLUSH_CTL));
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)
RADEON_WRITE(R700_DB_DEBUG3, R700_DB_CLK_OFF_DELAY(0x1f));
else {
db_debug3 = RADEON_READ(R700_DB_DEBUG3);
db_debug3 &= ~R700_DB_CLK_OFF_DELAY(0x1f);
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_RV770:
case CHIP_RV740:
db_debug3 |= R700_DB_CLK_OFF_DELAY(0x1f);
break;
case CHIP_RV710:
case CHIP_RV730:
default:
db_debug3 |= R700_DB_CLK_OFF_DELAY(2);
break;
}
RADEON_WRITE(R700_DB_DEBUG3, db_debug3);
if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV770) {
db_debug4 = RADEON_READ(RV700_DB_DEBUG4);
db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER;
RADEON_WRITE(RV700_DB_DEBUG4, db_debug4);
......@@ -1519,10 +1626,10 @@ static void r700_gfx_init(struct drm_device *dev,
R600_ALU_UPDATE_FIFO_HIWATER(0x8));
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_RV770:
sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
break;
case CHIP_RV730:
case CHIP_RV710:
sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
break;
case CHIP_RV740:
default:
sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
......@@ -2529,3 +2636,12 @@ int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fp
mutex_unlock(&dev_priv->cs_mutex);
return r;
}
void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size)
{
struct drm_radeon_private *dev_priv = dev->dev_private;
*npipes = dev_priv->r600_npipes;
*nbanks = dev_priv->r600_nbanks;
*group_size = dev_priv->r600_group_size;
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -237,6 +237,10 @@ int radeon_agp_init(struct radeon_device *rdev)
rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
rdev->mc.gtt_start = rdev->mc.agp_base;
rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1;
dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end);
/* workaround some hw issues */
if (rdev->family < CHIP_R200) {
......
......@@ -411,6 +411,12 @@ bool radeon_get_bios(struct radeon_device *rdev)
goto free_bios;
}
tmp = RBIOS16(0x18);
if (RBIOS8(tmp + 0x14) != 0x0) {
DRM_INFO("Not an x86 BIOS ROM, not using.\n");
goto free_bios;
}
rdev->bios_header_start = RBIOS16(0x48);
if (!rdev->bios_header_start) {
goto free_bios;
......
......@@ -96,6 +96,7 @@ void radeon_get_clock_info(struct drm_device *dev)
struct radeon_device *rdev = dev->dev_private;
struct radeon_pll *p1pll = &rdev->clock.p1pll;
struct radeon_pll *p2pll = &rdev->clock.p2pll;
struct radeon_pll *dcpll = &rdev->clock.dcpll;
struct radeon_pll *spll = &rdev->clock.spll;
struct radeon_pll *mpll = &rdev->clock.mpll;
int ret;
......@@ -204,6 +205,17 @@ void radeon_get_clock_info(struct drm_device *dev)
p2pll->max_frac_feedback_div = 0;
}
/* dcpll is DCE4 only */
dcpll->min_post_div = 2;
dcpll->max_post_div = 0x7f;
dcpll->min_frac_feedback_div = 0;
dcpll->max_frac_feedback_div = 9;
dcpll->min_ref_div = 2;
dcpll->max_ref_div = 0x3ff;
dcpll->min_feedback_div = 4;
dcpll->max_feedback_div = 0xfff;
dcpll->best_vco = 0;
p1pll->min_ref_div = 2;
p1pll->max_ref_div = 0x3ff;
p1pll->min_feedback_div = 4;
......@@ -846,8 +858,10 @@ int radeon_static_clocks_init(struct drm_device *dev)
/* XXX make sure engine is idle */
if (radeon_dynclks != -1) {
if (radeon_dynclks)
radeon_set_clock_gating(rdev, 1);
if (radeon_dynclks) {
if (rdev->asic->set_clock_gating)
radeon_set_clock_gating(rdev, 1);
}
}
radeon_apply_clock_quirks(rdev);
return 0;
......
......@@ -479,10 +479,8 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
ret = connector_status_connected;
else {
if (radeon_connector->ddc_bus) {
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (radeon_connector->edid)
ret = connector_status_connected;
}
......@@ -587,19 +585,14 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
if (!encoder)
ret = connector_status_disconnected;
if (radeon_connector->ddc_bus) {
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector);
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
......@@ -744,19 +737,14 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false;
if (radeon_connector->ddc_bus) {
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector);
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
......@@ -952,7 +940,7 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
if (radeon_connector->edid)
kfree(radeon_connector->edid);
if (radeon_dig_connector->dp_i2c_bus)
radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus);
kfree(radeon_connector->con_priv);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
......@@ -988,12 +976,10 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
ret = connector_status_connected;
}
} else {
radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
if (radeon_ddc_probe(radeon_connector)) {
radeon_dig_connector->dp_sink_type = sink_type;
ret = connector_status_connected;
}
radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
return ret;
......
......@@ -36,7 +36,14 @@ static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
uint32_t cur_lock;
if (ASIC_IS_AVIVO(rdev)) {
if (ASIC_IS_DCE4(rdev)) {
cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset);
if (lock)
cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
else
cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
} else if (ASIC_IS_AVIVO(rdev)) {
cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
if (lock)
cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
......@@ -58,7 +65,10 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_AVIVO(rdev)) {
if (ASIC_IS_DCE4(rdev)) {
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
} else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
......@@ -81,10 +91,14 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_AVIVO(rdev)) {
if (ASIC_IS_DCE4(rdev)) {
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
} else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
switch (radeon_crtc->crtc_id) {
case 0:
......@@ -109,7 +123,10 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_AVIVO(rdev)) {
if (ASIC_IS_DCE4(rdev)) {
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
} else if (ASIC_IS_AVIVO(rdev)) {
if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id)
WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0);
......@@ -197,7 +214,20 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
yorigin = CURSOR_HEIGHT - 1;
radeon_lock_cursor(crtc, true);
if (ASIC_IS_AVIVO(rdev)) {
if (ASIC_IS_DCE4(rdev)) {
/* cursors are offset into the total surface */
x += crtc->x;
y += crtc->y;
DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
/* XXX: check if evergreen has the same issues as avivo chips */
WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
((xorigin ? 0 : x) << 16) |
(yorigin ? 0 : y));
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1));
} else if (ASIC_IS_AVIVO(rdev)) {
int w = radeon_crtc->cursor_width;
int i = 0;
struct drm_crtc *crtc_p;
......
......@@ -86,7 +86,8 @@ int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
int radeon_new_pll = 1;
int radeon_new_pll = -1;
int radeon_dynpm = -1;
int radeon_audio = 1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
......@@ -122,9 +123,12 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444);
MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips");
MODULE_PARM_DESC(new_pll, "Select new PLL code");
module_param_named(new_pll, radeon_new_pll, int, 0444);
MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)");
module_param_named(dynpm, radeon_dynpm, int, 0444);
MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
module_param_named(audio, radeon_audio, int, 0444);
......
......@@ -295,6 +295,9 @@ typedef struct drm_radeon_private {
int r700_sc_prim_fifo_size;
int r700_sc_hiz_tile_fifo_size;
int r700_sc_earlyz_tile_fifo_fize;
int r600_group_size;
int r600_npipes;
int r600_nbanks;
struct mutex cs_mutex;
u32 cs_id_scnt;
......@@ -310,9 +313,11 @@ typedef struct drm_radeon_buf_priv {
u32 age;
} drm_radeon_buf_priv_t;
struct drm_buffer;
typedef struct drm_radeon_kcmd_buffer {
int bufsz;
char *buf;
struct drm_buffer *buffer;
int nbox;
struct drm_clip_rect __user *boxes;
} drm_radeon_kcmd_buffer_t;
......@@ -2122,4 +2127,32 @@ extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
write &= mask; \
} while (0)
/**
* Copy given number of dwords from drm buffer to the ring buffer.
*/
#define OUT_RING_DRM_BUFFER(buf, sz) do { \
int _size = (sz) * 4; \
struct drm_buffer *_buf = (buf); \
int _part_size; \
while (_size > 0) { \
_part_size = _size; \
\
if (write + _part_size/4 > mask) \
_part_size = ((mask + 1) - write)*4; \
\
if (drm_buffer_index(_buf) + _part_size > PAGE_SIZE) \
_part_size = PAGE_SIZE - drm_buffer_index(_buf);\
\
\
\
memcpy(ring + write, &_buf->data[drm_buffer_page(_buf)] \
[drm_buffer_index(_buf)], _part_size); \
\
_size -= _part_size; \
write = (write + _part_size/4) & mask; \
drm_buffer_advance(_buf, _part_size); \
} \
} while (0)
#endif /* __RADEON_DRV_H__ */
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册