radeon_irq_kms.c 8.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
#include "drmP.h"
29
#include "drm_crtc_helper.h"
30 31 32 33 34
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"

35 36
#define RADEON_WAIT_IDLE_TIMEOUT 200

37 38 39 40 41 42 43 44
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
{
	struct drm_device *dev = (struct drm_device *) arg;
	struct radeon_device *rdev = dev->dev_private;

	return radeon_irq_process(rdev);
}

A
Alex Deucher 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
/*
 * Handle hotplug events outside the interrupt handler proper.
 */
static void radeon_hotplug_work_func(struct work_struct *work)
{
	struct radeon_device *rdev = container_of(work, struct radeon_device,
						  hotplug_work);
	struct drm_device *dev = rdev->ddev;
	struct drm_mode_config *mode_config = &dev->mode_config;
	struct drm_connector *connector;

	if (mode_config->num_connector) {
		list_for_each_entry(connector, &mode_config->connector_list, head)
			radeon_connector_hotplug(connector);
	}
	/* Just fire off a uevent and let userspace tell us what to do */
61
	drm_helper_hpd_irq_event(dev);
A
Alex Deucher 已提交
62 63
}

64 65 66
void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
{
	struct radeon_device *rdev = dev->dev_private;
67
	unsigned long irqflags;
68 69
	unsigned i;

70
	spin_lock_irqsave(&rdev->irq.lock, irqflags);
71
	/* Disable *all* interrupts */
72
	for (i = 0; i < RADEON_NUM_RINGS; i++)
73
		atomic_set(&rdev->irq.ring_int[i], 0);
74
	rdev->irq.gui_idle = false;
75
	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
76
		rdev->irq.hpd[i] = false;
77 78
	for (i = 0; i < RADEON_MAX_CRTCS; i++) {
		rdev->irq.crtc_vblank_int[i] = false;
79
		atomic_set(&rdev->irq.pflip[i], 0);
80
		rdev->irq.afmt[i] = false;
81
	}
82
	radeon_irq_set(rdev);
83
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
84 85 86 87 88 89 90 91 92 93 94 95 96
	/* Clear bits */
	radeon_irq_process(rdev);
}

int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
{
	dev->max_vblank_count = 0x001fffff;
	return 0;
}

void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
{
	struct radeon_device *rdev = dev->dev_private;
97
	unsigned long irqflags;
98 99 100 101 102
	unsigned i;

	if (rdev == NULL) {
		return;
	}
103
	spin_lock_irqsave(&rdev->irq.lock, irqflags);
104
	/* Disable *all* interrupts */
105
	for (i = 0; i < RADEON_NUM_RINGS; i++)
106
		atomic_set(&rdev->irq.ring_int[i], 0);
107
	rdev->irq.gui_idle = false;
108
	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
109
		rdev->irq.hpd[i] = false;
110 111
	for (i = 0; i < RADEON_MAX_CRTCS; i++) {
		rdev->irq.crtc_vblank_int[i] = false;
112
		atomic_set(&rdev->irq.pflip[i], 0);
113
		rdev->irq.afmt[i] = false;
114
	}
115
	radeon_irq_set(rdev);
116
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
117 118
}

119 120 121 122 123 124 125 126 127 128
static bool radeon_msi_ok(struct radeon_device *rdev)
{
	/* RV370/RV380 was first asic with MSI support */
	if (rdev->family < CHIP_RV380)
		return false;

	/* MSIs don't work on AGP */
	if (rdev->flags & RADEON_IS_AGP)
		return false;

129 130 131 132 133 134
	/* force MSI on */
	if (radeon_msi == 1)
		return true;
	else if (radeon_msi == 0)
		return false;

135 136 137 138 139 140 141
	/* Quirks */
	/* HP RS690 only seems to work with MSIs. */
	if ((rdev->pdev->device == 0x791f) &&
	    (rdev->pdev->subsystem_vendor == 0x103c) &&
	    (rdev->pdev->subsystem_device == 0x30c2))
		return true;

142 143 144 145 146 147
	/* Dell RS690 only seems to work with MSIs. */
	if ((rdev->pdev->device == 0x791f) &&
	    (rdev->pdev->subsystem_vendor == 0x1028) &&
	    (rdev->pdev->subsystem_device == 0x01fc))
		return true;

148 149 150 151 152 153
	/* Dell RS690 only seems to work with MSIs. */
	if ((rdev->pdev->device == 0x791f) &&
	    (rdev->pdev->subsystem_vendor == 0x1028) &&
	    (rdev->pdev->subsystem_device == 0x01fd))
		return true;

D
Dave Airlie 已提交
154 155 156 157 158 159
	/* RV515 seems to have MSI issues where it loses
	 * MSI rearms occasionally. This leads to lockups and freezes.
	 * disable it by default.
	 */
	if (rdev->family == CHIP_RV515)
		return false;
160 161 162 163 164 165 166 167 168 169 170
	if (rdev->flags & RADEON_IS_IGP) {
		/* APUs work fine with MSIs */
		if (rdev->family >= CHIP_PALM)
			return true;
		/* lots of IGPs have problems with MSIs */
		return false;
	}

	return true;
}

171 172 173 174
int radeon_irq_kms_init(struct radeon_device *rdev)
{
	int r = 0;

175
	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
176
	INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
177

178
	spin_lock_init(&rdev->irq.lock);
179
	r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
180 181 182
	if (r) {
		return r;
	}
A
Alex Deucher 已提交
183 184
	/* enable msi */
	rdev->msi_enabled = 0;
185 186

	if (radeon_msi_ok(rdev)) {
A
Alex Deucher 已提交
187
		int ret = pci_enable_msi(rdev->pdev);
188
		if (!ret) {
A
Alex Deucher 已提交
189
			rdev->msi_enabled = 1;
190
			dev_info(rdev->dev, "radeon: using MSI.\n");
191
		}
A
Alex Deucher 已提交
192
	}
193
	rdev->irq.installed = true;
194 195 196 197 198
	r = drm_irq_install(rdev->ddev);
	if (r) {
		rdev->irq.installed = false;
		return r;
	}
199 200 201 202 203 204
	DRM_INFO("radeon: irq initialized.\n");
	return 0;
}

void radeon_irq_kms_fini(struct radeon_device *rdev)
{
205
	drm_vblank_cleanup(rdev->ddev);
206 207
	if (rdev->irq.installed) {
		drm_irq_uninstall(rdev->ddev);
208
		rdev->irq.installed = false;
A
Alex Deucher 已提交
209 210
		if (rdev->msi_enabled)
			pci_disable_msi(rdev->pdev);
211
	}
212
	flush_work_sync(&rdev->hotplug_work);
213
}
214

215
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
216 217 218
{
	unsigned long irqflags;

219 220 221 222 223
	if (!rdev->ddev->irq_enabled)
		return;

	if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
		spin_lock_irqsave(&rdev->irq.lock, irqflags);
224
		radeon_irq_set(rdev);
225
		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
226 227 228
	}
}

229
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
230 231 232
{
	unsigned long irqflags;

233 234 235 236 237
	if (!rdev->ddev->irq_enabled)
		return;

	if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
		spin_lock_irqsave(&rdev->irq.lock, irqflags);
238
		radeon_irq_set(rdev);
239
		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
240 241 242
	}
}

243 244 245 246 247 248 249
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
{
	unsigned long irqflags;

	if (crtc < 0 || crtc >= rdev->num_crtc)
		return;

250 251 252 253 254
	if (!rdev->ddev->irq_enabled)
		return;

	if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
		spin_lock_irqsave(&rdev->irq.lock, irqflags);
255
		radeon_irq_set(rdev);
256
		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
257 258 259 260 261 262 263 264 265 266
	}
}

void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
{
	unsigned long irqflags;

	if (crtc < 0 || crtc >= rdev->num_crtc)
		return;

267 268 269 270 271
	if (!rdev->ddev->irq_enabled)
		return;

	if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
		spin_lock_irqsave(&rdev->irq.lock, irqflags);
272
		radeon_irq_set(rdev);
273
		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
274 275 276
	}
}

277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
{
	unsigned long irqflags;

	spin_lock_irqsave(&rdev->irq.lock, irqflags);
	rdev->irq.afmt[block] = true;
	radeon_irq_set(rdev);
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);

}

void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
{
	unsigned long irqflags;

	spin_lock_irqsave(&rdev->irq.lock, irqflags);
	rdev->irq.afmt[block] = false;
	radeon_irq_set(rdev);
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}

void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
{
	unsigned long irqflags;
	int i;

	spin_lock_irqsave(&rdev->irq.lock, irqflags);
	for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
		rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
	radeon_irq_set(rdev);
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}

void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
{
	unsigned long irqflags;
	int i;

	spin_lock_irqsave(&rdev->irq.lock, irqflags);
	for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
		rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
	radeon_irq_set(rdev);
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}

int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev)
{
	unsigned long irqflags;
	int r;

	spin_lock_irqsave(&rdev->irq.lock, irqflags);
	rdev->irq.gui_idle = true;
	radeon_irq_set(rdev);
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);

	r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev),
			       msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));

	spin_lock_irqsave(&rdev->irq.lock, irqflags);
	rdev->irq.gui_idle = false;
	radeon_irq_set(rdev);
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
	return r;
}