radeon_irq_kms.c 9.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
#include "drmP.h"
29
#include "drm_crtc_helper.h"
30 31 32 33 34
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"

35 36
#define RADEON_WAIT_IDLE_TIMEOUT 200

37 38 39 40 41 42 43 44
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
{
	struct drm_device *dev = (struct drm_device *) arg;
	struct radeon_device *rdev = dev->dev_private;

	return radeon_irq_process(rdev);
}

A
Alex Deucher 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
/*
 * Handle hotplug events outside the interrupt handler proper.
 */
static void radeon_hotplug_work_func(struct work_struct *work)
{
	struct radeon_device *rdev = container_of(work, struct radeon_device,
						  hotplug_work);
	struct drm_device *dev = rdev->ddev;
	struct drm_mode_config *mode_config = &dev->mode_config;
	struct drm_connector *connector;

	if (mode_config->num_connector) {
		list_for_each_entry(connector, &mode_config->connector_list, head)
			radeon_connector_hotplug(connector);
	}
	/* Just fire off a uevent and let userspace tell us what to do */
61
	drm_helper_hpd_irq_event(dev);
A
Alex Deucher 已提交
62 63
}

64 65 66
void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
{
	struct radeon_device *rdev = dev->dev_private;
67
	unsigned long irqflags;
68 69
	unsigned i;

70
	spin_lock_irqsave(&rdev->irq.lock, irqflags);
71
	/* Disable *all* interrupts */
72 73
	for (i = 0; i < RADEON_NUM_RINGS; i++)
		rdev->irq.sw_int[i] = false;
74
	rdev->irq.gui_idle = false;
75
	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
76
		rdev->irq.hpd[i] = false;
77 78
	for (i = 0; i < RADEON_MAX_CRTCS; i++) {
		rdev->irq.crtc_vblank_int[i] = false;
79
		rdev->irq.pflip[i] = false;
80
		rdev->irq.afmt[i] = false;
81
	}
82
	radeon_irq_set(rdev);
83
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
84 85 86 87 88 89 90
	/* Clear bits */
	radeon_irq_process(rdev);
}

int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
{
	struct radeon_device *rdev = dev->dev_private;
91
	unsigned long irqflags;
92
	unsigned i;
93 94

	dev->max_vblank_count = 0x001fffff;
95
	spin_lock_irqsave(&rdev->irq.lock, irqflags);
96 97
	for (i = 0; i < RADEON_NUM_RINGS; i++)
		rdev->irq.sw_int[i] = true;
98
	radeon_irq_set(rdev);
99
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
100 101 102 103 104 105
	return 0;
}

void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
{
	struct radeon_device *rdev = dev->dev_private;
106
	unsigned long irqflags;
107 108 109 110 111
	unsigned i;

	if (rdev == NULL) {
		return;
	}
112
	spin_lock_irqsave(&rdev->irq.lock, irqflags);
113
	/* Disable *all* interrupts */
114 115
	for (i = 0; i < RADEON_NUM_RINGS; i++)
		rdev->irq.sw_int[i] = false;
116
	rdev->irq.gui_idle = false;
117
	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
118
		rdev->irq.hpd[i] = false;
119 120
	for (i = 0; i < RADEON_MAX_CRTCS; i++) {
		rdev->irq.crtc_vblank_int[i] = false;
121
		rdev->irq.pflip[i] = false;
122
		rdev->irq.afmt[i] = false;
123
	}
124
	radeon_irq_set(rdev);
125
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
126 127
}

128 129 130 131 132 133 134 135 136 137
static bool radeon_msi_ok(struct radeon_device *rdev)
{
	/* RV370/RV380 was first asic with MSI support */
	if (rdev->family < CHIP_RV380)
		return false;

	/* MSIs don't work on AGP */
	if (rdev->flags & RADEON_IS_AGP)
		return false;

138 139 140 141 142 143
	/* force MSI on */
	if (radeon_msi == 1)
		return true;
	else if (radeon_msi == 0)
		return false;

144 145 146 147 148 149 150
	/* Quirks */
	/* HP RS690 only seems to work with MSIs. */
	if ((rdev->pdev->device == 0x791f) &&
	    (rdev->pdev->subsystem_vendor == 0x103c) &&
	    (rdev->pdev->subsystem_device == 0x30c2))
		return true;

151 152 153 154 155 156
	/* Dell RS690 only seems to work with MSIs. */
	if ((rdev->pdev->device == 0x791f) &&
	    (rdev->pdev->subsystem_vendor == 0x1028) &&
	    (rdev->pdev->subsystem_device == 0x01fc))
		return true;

157 158 159 160 161 162
	/* Dell RS690 only seems to work with MSIs. */
	if ((rdev->pdev->device == 0x791f) &&
	    (rdev->pdev->subsystem_vendor == 0x1028) &&
	    (rdev->pdev->subsystem_device == 0x01fd))
		return true;

D
Dave Airlie 已提交
163 164 165 166 167 168
	/* RV515 seems to have MSI issues where it loses
	 * MSI rearms occasionally. This leads to lockups and freezes.
	 * disable it by default.
	 */
	if (rdev->family == CHIP_RV515)
		return false;
169 170 171 172 173 174 175 176 177 178 179
	if (rdev->flags & RADEON_IS_IGP) {
		/* APUs work fine with MSIs */
		if (rdev->family >= CHIP_PALM)
			return true;
		/* lots of IGPs have problems with MSIs */
		return false;
	}

	return true;
}

180 181 182 183
int radeon_irq_kms_init(struct radeon_device *rdev)
{
	int r = 0;

184
	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
185
	INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
186

187
	spin_lock_init(&rdev->irq.lock);
188
	r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
189 190 191
	if (r) {
		return r;
	}
A
Alex Deucher 已提交
192 193
	/* enable msi */
	rdev->msi_enabled = 0;
194 195

	if (radeon_msi_ok(rdev)) {
A
Alex Deucher 已提交
196
		int ret = pci_enable_msi(rdev->pdev);
197
		if (!ret) {
A
Alex Deucher 已提交
198
			rdev->msi_enabled = 1;
199
			dev_info(rdev->dev, "radeon: using MSI.\n");
200
		}
A
Alex Deucher 已提交
201
	}
202
	rdev->irq.installed = true;
203 204 205 206 207
	r = drm_irq_install(rdev->ddev);
	if (r) {
		rdev->irq.installed = false;
		return r;
	}
208 209 210 211 212 213
	DRM_INFO("radeon: irq initialized.\n");
	return 0;
}

void radeon_irq_kms_fini(struct radeon_device *rdev)
{
214
	drm_vblank_cleanup(rdev->ddev);
215 216
	if (rdev->irq.installed) {
		drm_irq_uninstall(rdev->ddev);
217
		rdev->irq.installed = false;
A
Alex Deucher 已提交
218 219
		if (rdev->msi_enabled)
			pci_disable_msi(rdev->pdev);
220
	}
221
	flush_work_sync(&rdev->hotplug_work);
222
}
223

224
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
225 226 227
{
	unsigned long irqflags;

228
	spin_lock_irqsave(&rdev->irq.lock, irqflags);
229 230
	if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) {
		rdev->irq.sw_int[ring] = true;
231 232
		radeon_irq_set(rdev);
	}
233
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
234 235
}

236
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
237 238 239
{
	unsigned long irqflags;

240
	spin_lock_irqsave(&rdev->irq.lock, irqflags);
241 242 243
	BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0);
	if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
		rdev->irq.sw_int[ring] = false;
244 245
		radeon_irq_set(rdev);
	}
246
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
247 248
}

249 250 251 252 253 254 255
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
{
	unsigned long irqflags;

	if (crtc < 0 || crtc >= rdev->num_crtc)
		return;

256
	spin_lock_irqsave(&rdev->irq.lock, irqflags);
257 258 259 260
	if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) {
		rdev->irq.pflip[crtc] = true;
		radeon_irq_set(rdev);
	}
261
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
262 263 264 265 266 267 268 269 270
}

void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
{
	unsigned long irqflags;

	if (crtc < 0 || crtc >= rdev->num_crtc)
		return;

271
	spin_lock_irqsave(&rdev->irq.lock, irqflags);
272 273 274 275 276
	BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0);
	if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
		rdev->irq.pflip[crtc] = false;
		radeon_irq_set(rdev);
	}
277
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
278 279
}

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
{
	unsigned long irqflags;

	spin_lock_irqsave(&rdev->irq.lock, irqflags);
	rdev->irq.afmt[block] = true;
	radeon_irq_set(rdev);
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);

}

void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
{
	unsigned long irqflags;

	spin_lock_irqsave(&rdev->irq.lock, irqflags);
	rdev->irq.afmt[block] = false;
	radeon_irq_set(rdev);
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}

void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
{
	unsigned long irqflags;
	int i;

	spin_lock_irqsave(&rdev->irq.lock, irqflags);
	for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
		rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
	radeon_irq_set(rdev);
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}

void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
{
	unsigned long irqflags;
	int i;

	spin_lock_irqsave(&rdev->irq.lock, irqflags);
	for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
		rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
	radeon_irq_set(rdev);
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}

int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev)
{
	unsigned long irqflags;
	int r;

	spin_lock_irqsave(&rdev->irq.lock, irqflags);
	rdev->irq.gui_idle = true;
	radeon_irq_set(rdev);
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);

	r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev),
			       msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));

	spin_lock_irqsave(&rdev->irq.lock, irqflags);
	rdev->irq.gui_idle = false;
	radeon_irq_set(rdev);
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
	return r;
}