提交 5b2eef96 编写于 作者: L Linus Torvalds

Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (390 commits)
  drm/radeon/kms: disable underscan by default
  drm/radeon/kms: only enable hdmi features if the monitor supports audio
  drm: Restore the old_fb upon modeset failure
  drm/nouveau: fix hwmon device binding
  radeon: consolidate asic-specific function decls for pre-r600
  vga_switcheroo: comparing too few characters in strncmp()
  drm/radeon/kms: add NI pci ids
  drm/radeon/kms: don't enable pcie gen2 on NI yet
  drm/radeon/kms: add radeon_asic struct for NI asics
  drm/radeon/kms/ni: load default sclk/mclk/vddc at pm init
  drm/radeon/kms: add ucode loader for NI
  drm/radeon/kms: add support for DCE5 display LUTs
  drm/radeon/kms: add ni_reg.h
  drm/radeon/kms: add bo blit support for NI
  drm/radeon/kms: always use writeback/events for fences on NI
  drm/radeon/kms: adjust default clock/vddc tracking for pm on DCE5
  drm/radeon/kms: add backend map workaround for barts
  drm/radeon/kms: fill gpu init for NI asics
  drm/radeon/kms: add disabled vbios accessor for NI asics
  drm/radeon/kms: handle NI thermal controller
  ...
......@@ -120,7 +120,6 @@ struct agp_bridge_driver {
void (*agp_destroy_page)(struct page *, int flags);
void (*agp_destroy_pages)(struct agp_memory *);
int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
void (*chipset_flush)(struct agp_bridge_data *);
};
struct agp_bridge_data {
......
......@@ -276,7 +276,6 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case AGPIOC_CHIPSET_FLUSH32:
ret_val = agpioc_chipset_flush_wrap(curr_priv);
break;
}
......
......@@ -102,6 +102,5 @@ void agp_free_memory_wrap(struct agp_memory *memory);
struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
struct agp_memory *agp_find_mem_by_key(int key);
struct agp_client *agp_find_client_by_pid(pid_t id);
int agpioc_chipset_flush_wrap(struct agp_file_private *priv);
#endif /* _AGP_COMPAT_H */
......@@ -957,13 +957,6 @@ static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
return agp_unbind_memory(memory);
}
int agpioc_chipset_flush_wrap(struct agp_file_private *priv)
{
DBG("");
agp_flush_chipset(agp_bridge);
return 0;
}
static long agp_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
......@@ -1039,7 +1032,6 @@ static long agp_ioctl(struct file *file,
break;
case AGPIOC_CHIPSET_FLUSH:
ret_val = agpioc_chipset_flush_wrap(curr_priv);
break;
}
......
......@@ -81,13 +81,6 @@ static int agp_get_key(void)
return -1;
}
void agp_flush_chipset(struct agp_bridge_data *bridge)
{
if (bridge->driver->chipset_flush)
bridge->driver->chipset_flush(bridge);
}
EXPORT_SYMBOL(agp_flush_chipset);
/*
* Use kmalloc if possible for the page list. Otherwise fall back to
* vmalloc. This speeds things up and also saves memory for small AGP
......@@ -487,26 +480,6 @@ int agp_unbind_memory(struct agp_memory *curr)
}
EXPORT_SYMBOL(agp_unbind_memory);
/**
* agp_rebind_emmory - Rewrite the entire GATT, useful on resume
*/
int agp_rebind_memory(void)
{
struct agp_memory *curr;
int ret_val = 0;
spin_lock(&agp_bridge->mapped_lock);
list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
ret_val = curr->bridge->driver->insert_memory(curr,
curr->pg_start,
curr->type);
if (ret_val != 0)
break;
}
spin_unlock(&agp_bridge->mapped_lock);
return ret_val;
}
EXPORT_SYMBOL(agp_rebind_memory);
/* End - Routines for handling swapping of agp_memory into the GATT */
......
......@@ -828,14 +828,9 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
static int agp_intel_resume(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
int ret_val;
bridge->driver->configure();
ret_val = agp_rebind_memory();
if (ret_val != 0)
return ret_val;
return 0;
}
#endif
......
......@@ -75,6 +75,8 @@
#define I810_GMS_DISABLE 0x00000000
#define I810_PGETBL_CTL 0x2020
#define I810_PGETBL_ENABLED 0x00000001
/* Note: PGETBL_CTL2 has a different offset on G33. */
#define I965_PGETBL_CTL2 0x20c4
#define I965_PGETBL_SIZE_MASK 0x0000000e
#define I965_PGETBL_SIZE_512KB (0 << 1)
#define I965_PGETBL_SIZE_256KB (1 << 1)
......@@ -82,9 +84,15 @@
#define I965_PGETBL_SIZE_1MB (3 << 1)
#define I965_PGETBL_SIZE_2MB (4 << 1)
#define I965_PGETBL_SIZE_1_5MB (5 << 1)
#define G33_PGETBL_SIZE_MASK (3 << 8)
#define G33_PGETBL_SIZE_1M (1 << 8)
#define G33_PGETBL_SIZE_2M (2 << 8)
#define G33_GMCH_SIZE_MASK (3 << 8)
#define G33_GMCH_SIZE_1M (1 << 8)
#define G33_GMCH_SIZE_2M (2 << 8)
#define G4x_GMCH_SIZE_MASK (0xf << 8)
#define G4x_GMCH_SIZE_1M (0x1 << 8)
#define G4x_GMCH_SIZE_2M (0x3 << 8)
#define G4x_GMCH_SIZE_VT_1M (0x9 << 8)
#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
#define G4x_GMCH_SIZE_VT_2M (0xc << 8)
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
......
此差异已折叠。
......@@ -466,10 +466,4 @@ drm_agp_bind_pages(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_agp_bind_pages);
void drm_agp_chipset_flush(struct drm_device *dev)
{
agp_flush_chipset(dev->agp->bridge);
}
EXPORT_SYMBOL(drm_agp_chipset_flush);
#endif /* __OS_HAS_AGP */
......@@ -336,7 +336,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_display_mode *adjusted_mode, saved_mode;
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
......@@ -350,6 +350,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!crtc->enabled)
return true;
saved_hwmode = crtc->hwmode;
saved_mode = crtc->mode;
saved_x = crtc->x;
saved_y = crtc->y;
......@@ -427,11 +428,21 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
}
/* Store real post-adjustment hardware mode. */
crtc->hwmode = *adjusted_mode;
/* Calculate and store various constants which
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
*/
drm_calc_timestamping_constants(crtc);
/* XXX free adjustedmode */
drm_mode_destroy(dev, adjusted_mode);
/* FIXME: add subpixel order */
done:
if (!ret) {
crtc->hwmode = saved_hwmode;
crtc->mode = saved_mode;
crtc->x = saved_x;
crtc->y = saved_y;
......@@ -650,6 +661,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
old_fb)) {
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
set->crtc->base.id);
set->crtc->fb = old_fb;
ret = -EINVAL;
goto fail;
}
......@@ -664,9 +676,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
set->crtc->fb = set->fb;
ret = crtc_funcs->mode_set_base(set->crtc,
set->x, set->y, old_fb);
if (ret != 0)
if (ret != 0) {
set->crtc->fb = old_fb;
goto fail;
}
}
DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
for (i = 0; i < set->num_connectors; i++) {
DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
......
......@@ -607,6 +607,25 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_fini);
void drm_fb_helper_fill_fix(struct fb_info *info, struct drm_framebuffer *fb)
{
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = fb->depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
FB_VISUAL_TRUECOLOR;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
info->fix.type_aux = 0;
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
info->fix.accel = FB_ACCEL_NONE;
info->fix.type_aux = 0;
info->fix.line_length = fb->pitch;
return;
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
{
......@@ -816,6 +835,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
drm_fb_helper_fill_fix(info, fb_helper->fb);
}
mutex_unlock(&dev->mode_config.mutex);
......@@ -953,6 +973,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (new_fb) {
info->var.pixclock = 0;
drm_fb_helper_fill_fix(info, fb_helper->fb);
if (register_framebuffer(info) < 0) {
return -EINVAL;
}
......@@ -979,24 +1000,6 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
FB_VISUAL_TRUECOLOR;
info->fix.type_aux = 0;
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
info->fix.accel = FB_ACCEL_NONE;
info->fix.type_aux = 0;
info->fix.line_length = pitch;
return;
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
......@@ -1005,6 +1008,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
info->var.accel_flags = FB_ACCELF_TEXT;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
......@@ -1530,3 +1534,24 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EMBEDDED)
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable console.
*/
#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EMBEDDED)
static int __init drm_fb_helper_modinit(void)
{
const char *name = "fbcon";
struct module *fbcon;
mutex_lock(&module_mutex);
fbcon = find_module(name);
mutex_unlock(&module_mutex);
if (!fbcon)
request_module_nowait(name);
return 0;
}
module_init(drm_fb_helper_modinit);
#endif
......@@ -236,6 +236,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
return -EBUSY; /* No exclusive opens */
if (!drm_cpu_valid())
return -EINVAL;
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return -EINVAL;
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
......
此差异已折叠。
......@@ -392,9 +392,35 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
mm->scan_check_range = 0;
}
EXPORT_SYMBOL(drm_mm_init_scan);
/**
* Initializa lru scanning.
*
* This simply sets up the scanning routines with the parameters for the desired
* hole. This version is for range-restricted scans.
*
* Warning: As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end)
{
mm->scan_alignment = alignment;
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
mm->scan_start = start;
mm->scan_end = end;
mm->scan_check_range = 1;
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
/**
* Add a node to the scan list that might be freed to make space for the desired
* hole.
......@@ -406,6 +432,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct list_head *prev_free, *next_free;
struct drm_mm_node *prev_node, *next_node;
unsigned long adj_start;
unsigned long adj_end;
mm->scanned_blocks++;
......@@ -452,7 +480,17 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
node->free_stack.prev = prev_free;
node->free_stack.next = next_free;
if (check_free_hole(node->start, node->start + node->size,
if (mm->scan_check_range) {
adj_start = node->start < mm->scan_start ?
mm->scan_start : node->start;
adj_end = node->start + node->size > mm->scan_end ?
mm->scan_end : node->start + node->size;
} else {
adj_start = node->start;
adj_end = node->start + node->size;
}
if (check_free_hole(adj_start , adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = node->start;
mm->scan_hit_size = node->size;
......
......@@ -40,12 +40,22 @@
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
EXPORT_SYMBOL(drm_vblank_offdelay);
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
module_param_named(debug, drm_debug, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
struct idr drm_minors_idr;
......
......@@ -9,6 +9,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
i915_gem_tiling.o \
i915_trace_points.o \
intel_display.o \
......
此差异已折叠。
此差异已折叠。
......@@ -111,7 +111,7 @@ static const struct intel_device_info intel_i965g_info = {
static const struct intel_device_info intel_i965gm_info = {
.gen = 4, .is_crestline = 1,
.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
.has_overlay = 1,
.supports_tv = 1,
};
......@@ -130,7 +130,7 @@ static const struct intel_device_info intel_g45_info = {
static const struct intel_device_info intel_gm45_info = {
.gen = 4, .is_g4x = 1,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1,
.has_bsd_ring = 1,
......@@ -150,7 +150,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = {
.gen = 5, .is_mobile = 1,
.need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 0, /* disabled due to buggy hardware */
.has_bsd_ring = 1,
};
......@@ -165,6 +165,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
static const struct intel_device_info intel_sandybridge_m_info = {
.gen = 6, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
};
......@@ -244,10 +245,34 @@ void intel_detect_pch (struct drm_device *dev)
}
}
void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
count = 0;
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
udelay(10);
I915_WRITE_NOTRACE(FORCEWAKE, 1);
POSTING_READ(FORCEWAKE);
count = 0;
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
udelay(10);
}
void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_kms_helper_poll_disable(dev);
pci_save_state(dev->pdev);
/* If KMS is active, we do the leavevt stuff here */
......@@ -284,7 +309,9 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
if (state.event == PM_EVENT_PRETHAW)
return 0;
drm_kms_helper_poll_disable(dev);
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
error = i915_drm_freeze(dev);
if (error)
......@@ -304,6 +331,12 @@ static int i915_drm_thaw(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
i915_restore_state(dev);
intel_opregion_setup(dev);
......@@ -332,6 +365,9 @@ int i915_resume(struct drm_device *dev)
{
int ret;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
if (pci_enable_device(dev->pdev))
return -EIO;
......@@ -405,6 +441,14 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags)
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
static int gen6_do_reset(struct drm_device *dev, u8 flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
}
/**
* i965_reset - reset chip after a hang
* @dev: drm device to reset
......@@ -431,7 +475,8 @@ int i915_reset(struct drm_device *dev, u8 flags)
bool need_display = true;
int ret;
mutex_lock(&dev->struct_mutex);
if (!mutex_trylock(&dev->struct_mutex))
return -EBUSY;
i915_gem_reset(dev);
......@@ -439,6 +484,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
if (get_seconds() - dev_priv->last_gpu_reset < 5) {
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
} else switch (INTEL_INFO(dev)->gen) {
case 6:
ret = gen6_do_reset(dev, flags);
break;
case 5:
ret = ironlake_do_reset(dev, flags);
break;
......@@ -472,9 +520,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) {
struct intel_ring_buffer *ring = &dev_priv->render_ring;
dev_priv->mm.suspended = 0;
ring->init(dev, ring);
dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
if (HAS_BSD(dev))
dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
if (HAS_BLT(dev))
dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_irq_install(dev);
......@@ -523,6 +576,9 @@ static int i915_pm_suspend(struct device *dev)
return -ENODEV;
}
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
error = i915_drm_freeze(drm_dev);
if (error)
return error;
......@@ -606,6 +662,8 @@ static struct drm_driver driver = {
.device_is_agp = i915_driver_device_is_agp,
.enable_vblank = i915_enable_vblank,
.disable_vblank = i915_disable_vblank,
.get_vblank_timestamp = i915_get_vblank_timestamp,
.get_scanout_position = i915_get_crtc_scanoutpos,
.irq_preinstall = i915_driver_irq_preinstall,
.irq_postinstall = i915_driver_irq_postinstall,
.irq_uninstall = i915_driver_irq_uninstall,
......@@ -661,8 +719,6 @@ static int __init i915_init(void)
driver.num_ioctls = i915_max_ioctl;
i915_gem_shrinker_init();
/*
* If CONFIG_DRM_I915_KMS is set, default to KMS unless
* explicitly disabled with the module pararmeter.
......@@ -684,17 +740,11 @@ static int __init i915_init(void)
driver.driver_features &= ~DRIVER_MODESET;
#endif
if (!(driver.driver_features & DRIVER_MODESET)) {
driver.suspend = i915_suspend;
driver.resume = i915_resume;
}
return drm_init(&driver);
}
static void __exit i915_exit(void)
{
i915_gem_shrinker_exit();
drm_exit(&driver);
}
......
此差异已折叠。
此差异已折叠。
......@@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
}
void
i915_gem_dump_object(struct drm_gem_object *obj, int len,
i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark)
{
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page;
DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
int page_len, chunk, chunk_len;
......@@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
chunk_len = page_len - chunk;
if (chunk_len > 128)
chunk_len = 128;
i915_gem_dump_page(obj_priv->pages[page],
i915_gem_dump_page(obj->pages[page],
chunk, chunk + chunk_len,
obj_priv->gtt_offset +
obj->gtt_offset +
page * PAGE_SIZE,
mark);
}
......@@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_device *dev = obj->base.dev;
int page;
uint32_t *gtt_mapping;
uint32_t *backing_map = NULL;
int bad_count = 0;
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
__func__, obj, obj_priv->gtt_offset, handle,
__func__, obj, obj->gtt_offset, handle,
obj->size / 1024);
gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
obj->size);
gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n");
return;
......@@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
backing_map = kmap_atomic(obj->pages[page], KM_USER0);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
......@@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
if (cpuval != gttval) {
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
"0x%08x vs 0x%08x\n",
(int)(obj_priv->gtt_offset +
(int)(obj->gtt_offset +
page * PAGE_SIZE + i * 4),
cpuval, gttval);
if (bad_count++ >= 8) {
......
......@@ -32,28 +32,36 @@
#include "i915_drm.h"
static bool
mark_free(struct drm_i915_gem_object *obj_priv,
struct list_head *unwind)
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
list_add(&obj_priv->evict_list, unwind);
drm_gem_object_reference(&obj_priv->base);
return drm_mm_scan_add_block(obj_priv->gtt_space);
list_add(&obj->exec_list, unwind);
drm_gem_object_reference(&obj->base);
return drm_mm_scan_add_block(obj->gtt_space);
}
int
i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
struct drm_i915_gem_object *obj_priv;
struct drm_i915_gem_object *obj;
int ret = 0;
i915_gem_retire_requests(dev);
/* Re-check for free space after retiring requests */
if (mappable) {
if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
min_size, alignment, 0,
dev_priv->mm.gtt_mappable_end,
0))
return 0;
} else {
if (drm_mm_search_free(&dev_priv->mm.gtt_space,
min_size, alignment, 0))
return 0;
}
/*
* The goal is to evict objects and amalgamate space in LRU order.
......@@ -79,45 +87,50 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
*/
INIT_LIST_HEAD(&unwind_list);
if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
alignment, 0,
dev_priv->mm.gtt_mappable_end);
else
drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
if (mark_free(obj_priv, &unwind_list))
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
if (mark_free(obj, &unwind_list))
goto found;
}
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
if (obj_priv->base.write_domain || obj_priv->pin_count)
if (obj->base.write_domain || obj->pin_count)
continue;
if (mark_free(obj_priv, &unwind_list))
if (mark_free(obj, &unwind_list))
goto found;
}
/* Finally add anything with a pending flush (in order of retirement) */
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
if (obj_priv->pin_count)
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
if (obj->pin_count)
continue;
if (mark_free(obj_priv, &unwind_list))
if (mark_free(obj, &unwind_list))
goto found;
}
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
if (! obj_priv->base.write_domain || obj_priv->pin_count)
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (! obj->base.write_domain || obj->pin_count)
continue;
if (mark_free(obj_priv, &unwind_list))
if (mark_free(obj, &unwind_list))
goto found;
}
/* Nothing found, clean up and bail out! */
list_for_each_entry(obj_priv, &unwind_list, evict_list) {
ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
list_for_each_entry(obj, &unwind_list, exec_list) {
ret = drm_mm_scan_remove_block(obj->gtt_space);
BUG_ON(ret);
drm_gem_object_unreference(&obj_priv->base);
drm_gem_object_unreference(&obj->base);
}
/* We expect the caller to unpin, evict all and try again, or give up.
......@@ -131,33 +144,33 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
* temporary list. */
INIT_LIST_HEAD(&eviction_list);
while (!list_empty(&unwind_list)) {
obj_priv = list_first_entry(&unwind_list,
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
evict_list);
if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
list_move(&obj_priv->evict_list, &eviction_list);
exec_list);
if (drm_mm_scan_remove_block(obj->gtt_space)) {
list_move(&obj->exec_list, &eviction_list);
continue;
}
list_del(&obj_priv->evict_list);
drm_gem_object_unreference(&obj_priv->base);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
/* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) {
obj_priv = list_first_entry(&eviction_list,
obj = list_first_entry(&eviction_list,
struct drm_i915_gem_object,
evict_list);
exec_list);
if (ret == 0)
ret = i915_gem_object_unbind(&obj_priv->base);
list_del(&obj_priv->evict_list);
drm_gem_object_unreference(&obj_priv->base);
ret = i915_gem_object_unbind(obj);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
return ret;
}
int
i915_gem_evict_everything(struct drm_device *dev)
i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
......@@ -176,35 +189,21 @@ i915_gem_evict_everything(struct drm_device *dev)
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
ret = i915_gem_evict_inactive(dev);
if (ret)
return ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->mm.active_list));
BUG_ON(!lists_empty);
return 0;
return i915_gem_evict_inactive(dev, purgeable_only);
}
/** Unbinds all inactive objects. */
int
i915_gem_evict_inactive(struct drm_device *dev)
i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
while (!list_empty(&dev_priv->mm.inactive_list)) {
struct drm_gem_object *obj;
int ret;
obj = &list_first_entry(&dev_priv->mm.inactive_list,
struct drm_i915_gem_object,
mm_list)->base;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
DRM_ERROR("Error unbinding object: %d\n", ret);
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list, mm_list) {
if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
int ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
}
}
......
此差异已折叠。
/*
* Copyright © 2010 Daniel Vetter
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
i915_gem_clflush_object(obj);
if (dev_priv->mm.gtt->needs_dmar) {
BUG_ON(!obj->sg_list);
intel_gtt_insert_sg_entries(obj->sg_list,
obj->num_sg,
obj->gtt_space->start
>> PAGE_SHIFT,
obj->agp_type);
} else
intel_gtt_insert_pages(obj->gtt_space->start
>> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT,
obj->pages,
obj->agp_type);
}
intel_gtt_chipset_flush();
}
int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (dev_priv->mm.gtt->needs_dmar) {
ret = intel_gtt_map_memory(obj->pages,
obj->base.size >> PAGE_SHIFT,
&obj->sg_list,
&obj->num_sg);
if (ret != 0)
return ret;
intel_gtt_insert_sg_entries(obj->sg_list,
obj->num_sg,
obj->gtt_space->start >> PAGE_SHIFT,
obj->agp_type);
} else
intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT,
obj->pages,
obj->agp_type);
return 0;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->mm.gtt->needs_dmar) {
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
obj->num_sg = 0;
}
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
}
......@@ -181,7 +181,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
/* Check pitch constriants for all chips & tiling formats */
bool
static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
int tile_width;
......@@ -232,32 +232,44 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
return true;
}
bool
i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
/* Is the current GTT allocation valid for the change in tiling? */
static bool
i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (obj_priv->gtt_space == NULL)
return true;
u32 size;
if (tiling_mode == I915_TILING_NONE)
return true;
if (INTEL_INFO(dev)->gen >= 4)
if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true;
if (obj_priv->gtt_offset & (obj->size - 1))
return false;
if (IS_GEN3(dev)) {
if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
if (INTEL_INFO(obj->base.dev)->gen == 3) {
if (obj->gtt_offset & ~I915_FENCE_START_MASK)
return false;
} else {
if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
if (obj->gtt_offset & ~I830_FENCE_START_MASK)
return false;
}
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
if (INTEL_INFO(obj->base.dev)->gen == 3)
size = 1024*1024;
else
size = 512*1024;
while (size < obj->base.size)
size <<= 1;
if (obj->gtt_space->size != size)
return false;
if (obj->gtt_offset & (size - 1))
return false;
return true;
}
......@@ -267,30 +279,29 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv)
struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
struct drm_i915_gem_object *obj;
int ret;
ret = i915_gem_check_is_wedged(dev);
if (ret)
return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL)
return -ENOENT;
obj_priv = to_intel_bo(obj);
if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
drm_gem_object_unreference_unlocked(obj);
if (!i915_tiling_ok(dev,
args->stride, obj->base.size, args->tiling_mode)) {
drm_gem_object_unreference_unlocked(&obj->base);
return -EINVAL;
}
if (obj_priv->pin_count) {
drm_gem_object_unreference_unlocked(obj);
if (obj->pin_count) {
drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
}
......@@ -324,34 +335,28 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
if (args->tiling_mode != obj_priv->tiling_mode ||
args->stride != obj_priv->stride) {
if (args->tiling_mode != obj->tiling_mode ||
args->stride != obj->stride) {
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
* need to ensure that any fence register is cleared.
*/
if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
ret = i915_gem_object_unbind(obj);
else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
ret = i915_gem_object_put_fence_reg(obj, true);
else
i915_gem_release_mmap(obj);
if (ret != 0) {
args->tiling_mode = obj_priv->tiling_mode;
args->stride = obj_priv->stride;
goto err;
}
obj->map_and_fenceable =
obj->gtt_space == NULL ||
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
obj_priv->tiling_mode = args->tiling_mode;
obj_priv->stride = args->stride;
obj->tiling_changed = true;
obj->tiling_mode = args->tiling_mode;
obj->stride = args->stride;
}
err:
drm_gem_object_unreference(obj);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return ret;
return 0;
}
/**
......@@ -359,22 +364,20 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv)
struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
struct drm_i915_gem_object *obj;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL)
return -ENOENT;
obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
args->tiling_mode = obj_priv->tiling_mode;
switch (obj_priv->tiling_mode) {
args->tiling_mode = obj->tiling_mode;
switch (obj->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
break;
......@@ -394,7 +397,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
drm_gem_object_unreference(obj);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return 0;
......@@ -424,46 +427,44 @@ i915_gem_swizzle_page(struct page *page)
}
void
i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page_count = obj->size >> PAGE_SHIFT;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return;
if (obj_priv->bit_17 == NULL)
if (obj->bit_17 == NULL)
return;
for (i = 0; i < page_count; i++) {
char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj_priv->bit_17) != 0)) {
i915_gem_swizzle_page(obj_priv->pages[i]);
set_page_dirty(obj_priv->pages[i]);
(test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(obj->pages[i]);
set_page_dirty(obj->pages[i]);
}
}
}
void
i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page_count = obj->size >> PAGE_SHIFT;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return;
if (obj_priv->bit_17 == NULL) {
obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
if (obj->bit_17 == NULL) {
obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
sizeof(long), GFP_KERNEL);
if (obj_priv->bit_17 == NULL) {
if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
return;
......@@ -471,9 +472,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
}
for (i = 0; i < page_count; i++) {
if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
__set_bit(i, obj_priv->bit_17);
if (page_to_phys(obj->pages[i]) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj_priv->bit_17);
__clear_bit(i, obj->bit_17);
}
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -1442,8 +1442,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
/* Changes to enable or select take place the vblank
* after being written.
*/
intel_wait_for_vblank(intel_dp->base.base.dev,
intel_crtc->pipe);
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
......
此差异已折叠。
此差异已折叠。
......@@ -85,7 +85,8 @@ static u32 get_reserved(struct intel_gpio *gpio)
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(dev) && !IS_845G(dev))
reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
reserved = I915_READ_NOTRACE(gpio->reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
return reserved;
......@@ -96,9 +97,9 @@ static int get_clock(void *data)
struct intel_gpio *gpio = data;
struct drm_i915_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
I915_WRITE(gpio->reg, reserved);
return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
I915_WRITE_NOTRACE(gpio->reg, reserved);
return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
......@@ -106,9 +107,9 @@ static int get_data(void *data)
struct intel_gpio *gpio = data;
struct drm_i915_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
I915_WRITE(gpio->reg, reserved);
return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
I915_WRITE_NOTRACE(gpio->reg, reserved);
return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
......@@ -124,7 +125,7 @@ static void set_clock(void *data, int state_high)
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
I915_WRITE(gpio->reg, reserved | clock_bits);
I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
POSTING_READ(gpio->reg);
}
......@@ -141,7 +142,7 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
I915_WRITE(gpio->reg, reserved | data_bits);
I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
POSTING_READ(gpio->reg);
}
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册