nuvoton-cir.c 33.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
 *
 * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
 * Copyright (C) 2009 Nuvoton PS Team
 *
 * Special thanks to Nuvoton for providing hardware, spec sheets and
 * sample code upon which portions of this driver are based. Indirect
 * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
 * modeled after.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation; either version 2 of the
 * License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
 * USA
 */

28 29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

30 31 32 33 34 35 36
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pnp.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/slab.h>
37
#include <media/rc-core.h>
38 39 40 41
#include <linux/pci_ids.h>

#include "nuvoton-cir.h"

42 43
static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt);

44 45 46 47
static const struct nvt_chip nvt_chips[] = {
	{ "w83667hg", NVT_W83667HG },
	{ "NCT6775F", NVT_6775F },
	{ "NCT6776F", NVT_6776F },
48
	{ "NCT6779D", NVT_6779D },
49 50 51 52 53 54 55
};

static inline bool is_w83667hg(struct nvt_dev *nvt)
{
	return nvt->chip_ver == NVT_W83667HG;
}

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
/* write val to config reg */
static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
{
	outb(reg, nvt->cr_efir);
	outb(val, nvt->cr_efdr);
}

/* read val from config reg */
static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
{
	outb(reg, nvt->cr_efir);
	return inb(nvt->cr_efdr);
}

/* update config register bit without changing other bits */
static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
{
	u8 tmp = nvt_cr_read(nvt, reg) | val;
	nvt_cr_write(nvt, tmp, reg);
}

/* clear config register bit without changing other bits */
static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
{
	u8 tmp = nvt_cr_read(nvt, reg) & ~val;
	nvt_cr_write(nvt, tmp, reg);
}

/* enter extended function mode */
85
static inline int nvt_efm_enable(struct nvt_dev *nvt)
86
{
87 88 89
	if (!request_muxed_region(nvt->cr_efir, 2, NVT_DRIVER_NAME))
		return -EBUSY;

90 91 92
	/* Enabling Extended Function Mode explicitly requires writing 2x */
	outb(EFER_EFM_ENABLE, nvt->cr_efir);
	outb(EFER_EFM_ENABLE, nvt->cr_efir);
93 94

	return 0;
95 96 97 98 99 100
}

/* exit extended function mode */
static inline void nvt_efm_disable(struct nvt_dev *nvt)
{
	outb(EFER_EFM_DISABLE, nvt->cr_efir);
101 102

	release_region(nvt->cr_efir, 2);
103 104 105 106 107 108 109 110 111
}

/*
 * When you want to address a specific logical device, write its logical
 * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
 * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
 */
static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
{
112
	nvt_cr_write(nvt, ldev, CR_LOGICAL_DEV_SEL);
113 114
}

115 116 117 118 119 120 121 122 123
/* select and enable logical device with setting EFM mode*/
static inline void nvt_enable_logical_dev(struct nvt_dev *nvt, u8 ldev)
{
	nvt_efm_enable(nvt);
	nvt_select_logical_dev(nvt, ldev);
	nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
	nvt_efm_disable(nvt);
}

124 125 126 127 128 129 130 131 132
/* select and disable logical device with setting EFM mode*/
static inline void nvt_disable_logical_dev(struct nvt_dev *nvt, u8 ldev)
{
	nvt_efm_enable(nvt);
	nvt_select_logical_dev(nvt, ldev);
	nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
	nvt_efm_disable(nvt);
}

133 134 135 136 137 138 139 140 141
/* write val to cir config register */
static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
{
	outb(val, nvt->cir_addr + offset);
}

/* read val from cir config register */
static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
{
142
	return inb(nvt->cir_addr + offset);
143 144 145 146 147 148 149 150 151 152 153 154
}

/* write val to cir wake register */
static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
					  u8 val, u8 offset)
{
	outb(val, nvt->cir_wake_addr + offset);
}

/* read val from cir wake config register */
static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
{
155
	return inb(nvt->cir_wake_addr + offset);
156 157
}

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
/* don't override io address if one is set already */
static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr)
{
	unsigned long old_addr;

	old_addr = nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8;
	old_addr |= nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO);

	if (old_addr)
		*ioaddr = old_addr;
	else {
		nvt_cr_write(nvt, *ioaddr >> 8, CR_CIR_BASE_ADDR_HI);
		nvt_cr_write(nvt, *ioaddr & 0xff, CR_CIR_BASE_ADDR_LO);
	}
}

174 175 176
static ssize_t wakeup_data_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
177 178 179
{
	struct rc_dev *rc_dev = to_rc_dev(dev);
	struct nvt_dev *nvt = rc_dev->priv;
180
	int fifo_len, duration;
181
	unsigned long flags;
182
	ssize_t buf_len = 0;
183 184 185 186 187
	int i;

	spin_lock_irqsave(&nvt->nvt_lock, flags);

	fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
188
	fifo_len = min(fifo_len, WAKEUP_MAX_SIZE);
189 190

	/* go to first element to be read */
191
	while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX))
192 193
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);

194 195 196 197 198 199 200
	for (i = 0; i < fifo_len; i++) {
		duration = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
		duration = (duration & BUF_LEN_MASK) * SAMPLE_PERIOD;
		buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len,
				    "%d ", duration);
	}
	buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
201 202 203

	spin_unlock_irqrestore(&nvt->nvt_lock, flags);

204
	return buf_len;
205 206
}

207 208 209
static ssize_t wakeup_data_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf, size_t len)
210 211 212 213
{
	struct rc_dev *rc_dev = to_rc_dev(dev);
	struct nvt_dev *nvt = rc_dev->priv;
	unsigned long flags;
214 215 216 217 218 219 220 221 222 223 224 225 226
	u8 tolerance, config, wake_buf[WAKEUP_MAX_SIZE];
	char **argv;
	int i, count;
	unsigned int val;
	ssize_t ret;

	argv = argv_split(GFP_KERNEL, buf, &count);
	if (!argv)
		return -ENOMEM;
	if (!count || count > WAKEUP_MAX_SIZE) {
		ret = -EINVAL;
		goto out;
	}
227

228 229 230 231 232 233 234 235 236 237 238 239 240 241
	for (i = 0; i < count; i++) {
		ret = kstrtouint(argv[i], 10, &val);
		if (ret)
			goto out;
		val = DIV_ROUND_CLOSEST(val, SAMPLE_PERIOD);
		if (!val || val > 0x7f) {
			ret = -EINVAL;
			goto out;
		}
		wake_buf[i] = val;
		/* sequence must start with a pulse */
		if (i % 2 == 0)
			wake_buf[i] |= BUF_PULSE_BIT;
	}
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258

	/* hardcode the tolerance to 10% */
	tolerance = DIV_ROUND_UP(count, 10);

	spin_lock_irqsave(&nvt->nvt_lock, flags);

	nvt_clear_cir_wake_fifo(nvt);
	nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
	nvt_cir_wake_reg_write(nvt, tolerance, CIR_WAKE_FIFO_CMP_TOL);

	config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);

	/* enable writes to wake fifo */
	nvt_cir_wake_reg_write(nvt, config | CIR_WAKE_IRCON_MODE1,
			       CIR_WAKE_IRCON);

	for (i = 0; i < count; i++)
259
		nvt_cir_wake_reg_write(nvt, wake_buf[i], CIR_WAKE_WR_FIFO_DATA);
260 261 262 263 264

	nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);

	spin_unlock_irqrestore(&nvt->nvt_lock, flags);

265 266 267 268
	ret = len;
out:
	argv_free(argv);
	return ret;
269
}
270
static DEVICE_ATTR_RW(wakeup_data);
271

272 273 274 275 276 277
/* dump current cir register contents */
static void cir_dump_regs(struct nvt_dev *nvt)
{
	nvt_efm_enable(nvt);
	nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);

278 279 280 281 282
	pr_info("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
	pr_info(" * CR CIR ACTIVE :   0x%x\n",
		nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
	pr_info(" * CR CIR BASE ADDR: 0x%x\n",
		(nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
283
		nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
284 285
	pr_info(" * CR CIR IRQ NUM:   0x%x\n",
		nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
286 287 288

	nvt_efm_disable(nvt);

289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
	pr_info("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
	pr_info(" * IRCON:     0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
	pr_info(" * IRSTS:     0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
	pr_info(" * IREN:      0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
	pr_info(" * RXFCONT:   0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
	pr_info(" * CP:        0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
	pr_info(" * CC:        0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
	pr_info(" * SLCH:      0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
	pr_info(" * SLCL:      0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
	pr_info(" * FIFOCON:   0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
	pr_info(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
	pr_info(" * SRXFIFO:   0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
	pr_info(" * TXFCONT:   0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
	pr_info(" * STXFIFO:   0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
	pr_info(" * FCCH:      0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
	pr_info(" * FCCL:      0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
	pr_info(" * IRFSM:     0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
306 307 308 309 310 311 312 313 314 315
}

/* dump current cir wake register contents */
static void cir_wake_dump_regs(struct nvt_dev *nvt)
{
	u8 i, fifo_len;

	nvt_efm_enable(nvt);
	nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);

316 317 318 319 320 321
	pr_info("%s: Dump CIR WAKE logical device registers:\n",
		NVT_DRIVER_NAME);
	pr_info(" * CR CIR WAKE ACTIVE :   0x%x\n",
		nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
	pr_info(" * CR CIR WAKE BASE ADDR: 0x%x\n",
		(nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
322
		nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
323 324
	pr_info(" * CR CIR WAKE IRQ NUM:   0x%x\n",
		nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
325 326 327

	nvt_efm_disable(nvt);

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
	pr_info("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
	pr_info(" * IRCON:          0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
	pr_info(" * IRSTS:          0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
	pr_info(" * IREN:           0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
	pr_info(" * FIFO CMP DEEP:  0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
	pr_info(" * FIFO CMP TOL:   0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
	pr_info(" * FIFO COUNT:     0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
	pr_info(" * SLCH:           0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
	pr_info(" * SLCL:           0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
	pr_info(" * FIFOCON:        0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
	pr_info(" * SRXFSTS:        0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
	pr_info(" * SAMPLE RX FIFO: 0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
	pr_info(" * WR FIFO DATA:   0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
	pr_info(" * RD FIFO ONLY:   0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
	pr_info(" * RD FIFO ONLY IDX: 0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
	pr_info(" * FIFO IGNORE:    0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
	pr_info(" * IRFSM:          0x%x\n",
		nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
361 362

	fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
363 364
	pr_info("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
	pr_info("* Contents =");
365
	for (i = 0; i < fifo_len; i++)
366 367 368
		pr_cont(" %02x",
			nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
	pr_cont("\n");
369 370
}

371 372 373 374 375 376 377 378 379 380 381 382 383 384
static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(nvt_chips); i++)
		if ((id & SIO_ID_MASK) == nvt_chips[i].chip_ver) {
			nvt->chip_ver = nvt_chips[i].chip_ver;
			return nvt_chips[i].name;
		}

	return NULL;
}


385
/* detect hardware features */
386
static int nvt_hw_detect(struct nvt_dev *nvt)
387
{
388 389
	const char *chip_name;
	int chip_id;
390 391 392 393

	nvt_efm_enable(nvt);

	/* Check if we're wired for the alternate EFER setup */
394 395
	nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
	if (nvt->chip_major == 0xff) {
396
		nvt_efm_disable(nvt);
397 398 399
		nvt->cr_efir = CR_EFIR2;
		nvt->cr_efdr = CR_EFDR2;
		nvt_efm_enable(nvt);
400
		nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
401
	}
402 403
	nvt->chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);

404 405
	nvt_efm_disable(nvt);

406
	chip_id = nvt->chip_major << 8 | nvt->chip_minor;
407 408 409 410 411 412
	if (chip_id == NVT_INVALID) {
		dev_err(&nvt->pdev->dev,
			"No device found on either EFM port\n");
		return -ENODEV;
	}

413
	chip_name = nvt_find_chip(nvt, chip_id);
414

415
	/* warn, but still let the driver load, if we don't know this chip */
416
	if (!chip_name)
417 418 419
		dev_warn(&nvt->pdev->dev,
			 "unknown chip, id: 0x%02x 0x%02x, it may not work...",
			 nvt->chip_major, nvt->chip_minor);
420
	else
421 422 423
		dev_info(&nvt->pdev->dev,
			 "found %s or compatible: chip id: 0x%02x 0x%02x",
			 chip_name, nvt->chip_major, nvt->chip_minor);
424

425
	return 0;
426 427 428 429
}

static void nvt_cir_ldev_init(struct nvt_dev *nvt)
{
430 431
	u8 val, psreg, psmask, psval;

432
	if (is_w83667hg(nvt)) {
433 434 435 436 437 438 439 440
		psreg = CR_MULTIFUNC_PIN_SEL;
		psmask = MULTIFUNC_PIN_SEL_MASK;
		psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB;
	} else {
		psreg = CR_OUTPUT_PIN_SEL;
		psmask = OUTPUT_PIN_SEL_MASK;
		psval = OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB;
	}
441

442 443 444 445 446
	/* output pin selection: enable CIR, with WB sensor enabled */
	val = nvt_cr_read(nvt, psreg);
	val &= psmask;
	val |= psval;
	nvt_cr_write(nvt, val, psreg);
447

448
	/* Select CIR logical device */
449 450
	nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);

451
	nvt_set_ioaddr(nvt, &nvt->cir_addr);
452 453 454 455 456 457 458 459 460

	nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);

	nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
		nvt->cir_addr, nvt->cir_irq);
}

static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
{
461
	/* Select ACPI logical device and anable it */
462 463 464 465 466 467 468 469 470
	nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
	nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);

	/* Enable CIR Wake via PSOUT# (Pin60) */
	nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);

	/* enable pme interrupt of cir wakeup event */
	nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);

471
	/* Select CIR Wake logical device */
472 473
	nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);

474
	nvt_set_ioaddr(nvt, &nvt->cir_wake_addr);
475

476 477
	nvt_dbg("CIR Wake initialized, base io port address: 0x%lx",
		nvt->cir_wake_addr);
478 479 480 481 482
}

/* clear out the hardware's cir rx fifo */
static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
{
483
	u8 val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
484 485 486 487 488 489
	nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
}

/* clear out the hardware's cir wake rx fifo */
static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
{
490 491 492 493 494 495 496
	u8 val, config;

	config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);

	/* clearing wake fifo works in learning mode only */
	nvt_cir_wake_reg_write(nvt, config & ~CIR_WAKE_IRCON_MODE0,
			       CIR_WAKE_IRCON);
497 498 499 500

	val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
	nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
			       CIR_WAKE_FIFOCON);
501 502

	nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
503 504 505 506 507 508 509 510 511 512 513
}

/* clear out the hardware's cir tx fifo */
static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
{
	u8 val;

	val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
	nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
}

514 515 516 517 518
/* enable RX Trigger Level Reach and Packet End interrupts */
static void nvt_set_cir_iren(struct nvt_dev *nvt)
{
	u8 iren;

519
	iren = CIR_IREN_RTR | CIR_IREN_PE | CIR_IREN_RFO;
520 521 522
	nvt_cir_reg_write(nvt, iren, CIR_IREN);
}

523 524 525 526 527 528 529 530 531 532 533 534 535 536
static void nvt_cir_regs_init(struct nvt_dev *nvt)
{
	/* set sample limit count (PE interrupt raised when reached) */
	nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
	nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);

	/* set fifo irq trigger levels */
	nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
			  CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);

	/*
	 * Enable TX and RX, specify carrier on = low, off = high, and set
	 * sample period (currently 50us)
	 */
537 538 539 540
	nvt_cir_reg_write(nvt,
			  CIR_IRCON_TXEN | CIR_IRCON_RXEN |
			  CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
			  CIR_IRCON);
541 542 543 544 545 546 547 548

	/* clear hardware rx and tx fifos */
	nvt_clear_cir_fifo(nvt);
	nvt_clear_tx_fifo(nvt);

	/* clear any and all stray interrupts */
	nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);

549 550
	/* and finally, enable interrupts */
	nvt_set_cir_iren(nvt);
551 552 553

	/* enable the CIR logical device */
	nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
554 555 556 557 558
}

static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
{
	/*
559 560
	 * Disable RX, set specific carrier on = low, off = high,
	 * and sample period (currently 50us)
561
	 */
562
	nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 |
563 564 565 566 567 568
			       CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
			       CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
			       CIR_WAKE_IRCON);

	/* clear any and all stray interrupts */
	nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
569 570 571

	/* enable the CIR WAKE logical device */
	nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
572 573 574 575
}

static void nvt_enable_wake(struct nvt_dev *nvt)
{
576 577
	unsigned long flags;

578 579 580 581 582 583 584 585 586 587 588
	nvt_efm_enable(nvt);

	nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
	nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
	nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);

	nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
	nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);

	nvt_efm_disable(nvt);

589 590
	spin_lock_irqsave(&nvt->nvt_lock, flags);

591 592
	nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
			       CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
593 594
			       CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
			       CIR_WAKE_IRCON);
595 596
	nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
	nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
597 598

	spin_unlock_irqrestore(&nvt->nvt_lock, flags);
599 600
}

601
#if 0 /* Currently unused */
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
/* rx carrier detect only works in learning mode, must be called w/nvt_lock */
static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
{
	u32 count, carrier, duration = 0;
	int i;

	count = nvt_cir_reg_read(nvt, CIR_FCCL) |
		nvt_cir_reg_read(nvt, CIR_FCCH) << 8;

	for (i = 0; i < nvt->pkts; i++) {
		if (nvt->buf[i] & BUF_PULSE_BIT)
			duration += nvt->buf[i] & BUF_LEN_MASK;
	}

	duration *= SAMPLE_PERIOD;

	if (!count || !duration) {
619 620 621
		dev_notice(&nvt->pdev->dev,
			   "Unable to determine carrier! (c:%u, d:%u)",
			   count, duration);
622 623 624
		return 0;
	}

625
	carrier = MS_TO_NS(count) / duration;
626 627 628 629 630 631 632 633 634

	if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
		nvt_dbg("WTF? Carrier frequency out of range!");

	nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
		carrier, count, duration);

	return carrier;
}
635
#endif
636 637 638 639 640 641 642
/*
 * set carrier frequency
 *
 * set carrier on 2 registers: CP & CC
 * always set CP as 0x81
 * set CC by SPEC, CC = 3MHz/carrier - 1
 */
643
static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
644
{
645
	struct nvt_dev *nvt = dev->priv;
646 647
	u16 val;

648 649 650
	if (carrier == 0)
		return -EINVAL;

651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
	nvt_cir_reg_write(nvt, 1, CIR_CP);
	val = 3000000 / (carrier) - 1;
	nvt_cir_reg_write(nvt, val & 0xff, CIR_CC);

	nvt_dbg("cp: 0x%x cc: 0x%x\n",
		nvt_cir_reg_read(nvt, CIR_CP), nvt_cir_reg_read(nvt, CIR_CC));

	return 0;
}

/*
 * nvt_tx_ir
 *
 * 1) clean TX fifo first (handled by AP)
 * 2) copy data from user space
 * 3) disable RX interrupts, enable TX interrupts: TTR & TFU
 * 4) send 9 packets to TX FIFO to open TTR
 * in interrupt_handler:
 * 5) send all data out
 * go back to write():
 * 6) disable TX interrupts, re-enable RX interupts
 *
 * The key problem of this function is user space data may larger than
 * driver's data buf length. So nvt_tx_ir() will only copy TX_BUF_LEN data to
 * buf, and keep current copied data buf num in cur_buf_num. But driver's buf
 * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
 * set TXFCONT as 0xff, until buf_count less than 0xff.
 */
679
static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
680
{
681
	struct nvt_dev *nvt = dev->priv;
682 683 684 685 686 687 688
	unsigned long flags;
	unsigned int i;
	u8 iren;
	int ret;

	spin_lock_irqsave(&nvt->tx.lock, flags);

689 690
	ret = min((unsigned)(TX_BUF_LEN / sizeof(unsigned)), n);
	nvt->tx.buf_count = (ret * sizeof(unsigned));
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729

	memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count);

	nvt->tx.cur_buf_num = 0;

	/* save currently enabled interrupts */
	iren = nvt_cir_reg_read(nvt, CIR_IREN);

	/* now disable all interrupts, save TFU & TTR */
	nvt_cir_reg_write(nvt, CIR_IREN_TFU | CIR_IREN_TTR, CIR_IREN);

	nvt->tx.tx_state = ST_TX_REPLY;

	nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV_8 |
			  CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);

	/* trigger TTR interrupt by writing out ones, (yes, it's ugly) */
	for (i = 0; i < 9; i++)
		nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO);

	spin_unlock_irqrestore(&nvt->tx.lock, flags);

	wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST);

	spin_lock_irqsave(&nvt->tx.lock, flags);
	nvt->tx.tx_state = ST_TX_NONE;
	spin_unlock_irqrestore(&nvt->tx.lock, flags);

	/* restore enabled interrupts to prior state */
	nvt_cir_reg_write(nvt, iren, CIR_IREN);

	return ret;
}

/* dump contents of the last rx buffer we got from the hw rx fifo */
static void nvt_dump_rx_buf(struct nvt_dev *nvt)
{
	int i;

730
	printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts);
731
	for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
732 733
		printk(KERN_CONT "0x%02x ", nvt->buf[i]);
	printk(KERN_CONT "\n");
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
}

/*
 * Process raw data in rx driver buffer, store it in raw IR event kfifo,
 * trigger decode when appropriate.
 *
 * We get IR data samples one byte at a time. If the msb is set, its a pulse,
 * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
 * (default 50us) intervals for that pulse/space. A discrete signal is
 * followed by a series of 0x7f packets, then either 0x7<something> or 0x80
 * to signal more IR coming (repeats) or end of IR, respectively. We store
 * sample data in the raw event kfifo until we see 0x7<something> (except f)
 * or 0x80, at which time, we trigger a decode operation.
 */
static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
{
750
	DEFINE_IR_RAW_EVENT(rawir);
751 752 753 754 755 756 757 758
	u8 sample;
	int i;

	nvt_dbg_verbose("%s firing", __func__);

	if (debug)
		nvt_dump_rx_buf(nvt);

759
	nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
760

761
	for (i = 0; i < nvt->pkts; i++) {
762 763 764
		sample = nvt->buf[i];

		rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
765 766
		rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
					  * SAMPLE_PERIOD);
767

768 769
		nvt_dbg("Storing %s with duration %d",
			rawir.pulse ? "pulse" : "space", rawir.duration);
770

771
		ir_raw_event_store_with_filter(nvt->rdev, &rawir);
772 773 774 775 776 777

		/*
		 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
		 * indicates end of IR signal, but new data incoming. In both
		 * cases, it means we're ready to call ir_raw_event_handle
		 */
778
		if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {
779
			nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
780
			ir_raw_event_handle(nvt->rdev);
781
		}
782 783
	}

784 785
	nvt->pkts = 0;

786 787 788
	nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
	ir_raw_event_handle(nvt->rdev);

789 790 791
	nvt_dbg_verbose("%s done", __func__);
}

792 793
static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
{
794
	dev_warn(&nvt->pdev->dev, "RX FIFO overrun detected, flushing data!");
795 796 797 798 799 800

	nvt->pkts = 0;
	nvt_clear_cir_fifo(nvt);
	ir_raw_event_reset(nvt->rdev);
}

801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
/* copy data from hardware rx fifo into driver buffer */
static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
{
	u8 fifocount, val;
	unsigned int b_idx;
	int i;

	/* Get count of how many bytes to read from RX FIFO */
	fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);

	nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);

	b_idx = nvt->pkts;

	/* This should never happen, but lets check anyway... */
	if (b_idx + fifocount > RX_BUF_LEN) {
		nvt_process_rx_ir_data(nvt);
		b_idx = 0;
	}

	/* Read fifocount bytes from CIR Sample RX FIFO register */
	for (i = 0; i < fifocount; i++) {
		val = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
		nvt->buf[b_idx + i] = val;
	}

	nvt->pkts += fifocount;
	nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);

	nvt_process_rx_ir_data(nvt);
}

static void nvt_cir_log_irqs(u8 status, u8 iren)
{
835
	nvt_dbg("IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
		status, iren,
		status & CIR_IRSTS_RDR	? " RDR"	: "",
		status & CIR_IRSTS_RTR	? " RTR"	: "",
		status & CIR_IRSTS_PE	? " PE"		: "",
		status & CIR_IRSTS_RFO	? " RFO"	: "",
		status & CIR_IRSTS_TE	? " TE"		: "",
		status & CIR_IRSTS_TTR	? " TTR"	: "",
		status & CIR_IRSTS_TFU	? " TFU"	: "",
		status & CIR_IRSTS_GH	? " GH"		: "",
		status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
			   CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
			   CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
}

static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
{
	unsigned long flags;
	u8 tx_state;

	spin_lock_irqsave(&nvt->tx.lock, flags);
	tx_state = nvt->tx.tx_state;
	spin_unlock_irqrestore(&nvt->tx.lock, flags);

859
	return tx_state == ST_TX_NONE;
860 861 862 863 864 865
}

/* interrupt service routine for incoming and outgoing CIR data */
static irqreturn_t nvt_cir_isr(int irq, void *data)
{
	struct nvt_dev *nvt = data;
866
	u8 status, iren;
867 868 869 870
	unsigned long flags;

	nvt_dbg_verbose("%s firing", __func__);

871 872
	spin_lock_irqsave(&nvt->nvt_lock, flags);

873 874 875 876 877 878 879 880 881 882 883 884 885 886
	/*
	 * Get IR Status register contents. Write 1 to ack/clear
	 *
	 * bit: reg name      - description
	 *   7: CIR_IRSTS_RDR - RX Data Ready
	 *   6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
	 *   5: CIR_IRSTS_PE  - Packet End
	 *   4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
	 *   3: CIR_IRSTS_TE  - TX FIFO Empty
	 *   2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
	 *   1: CIR_IRSTS_TFU - TX FIFO Underrun
	 *   0: CIR_IRSTS_GH  - Min Length Detected
	 */
	status = nvt_cir_reg_read(nvt, CIR_IRSTS);
887 888 889 890 891 892
	iren = nvt_cir_reg_read(nvt, CIR_IREN);

	/* IRQ may be shared with CIR WAKE, therefore check for each
	 * status bit whether the related interrupt source is enabled
	 */
	if (!(status & iren)) {
893
		spin_unlock_irqrestore(&nvt->nvt_lock, flags);
894
		nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
895
		return IRQ_NONE;
896 897 898 899 900 901
	}

	/* ack/clear all irq flags we've got */
	nvt_cir_reg_write(nvt, status, CIR_IRSTS);
	nvt_cir_reg_write(nvt, 0, CIR_IRSTS);

902
	nvt_cir_log_irqs(status, iren);
903

904 905 906
	if (status & CIR_IRSTS_RFO)
		nvt_handle_rx_fifo_overrun(nvt);

907
	else if (status & (CIR_IRSTS_RTR | CIR_IRSTS_PE)) {
908 909 910 911 912
		/* We only do rx if not tx'ing */
		if (nvt_cir_tx_inactive(nvt))
			nvt_get_rx_ir_data(nvt);
	}

913 914
	spin_unlock_irqrestore(&nvt->nvt_lock, flags);

915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
	if (status & CIR_IRSTS_TE)
		nvt_clear_tx_fifo(nvt);

	if (status & CIR_IRSTS_TTR) {
		unsigned int pos, count;
		u8 tmp;

		spin_lock_irqsave(&nvt->tx.lock, flags);

		pos = nvt->tx.cur_buf_num;
		count = nvt->tx.buf_count;

		/* Write data into the hardware tx fifo while pos < count */
		if (pos < count) {
			nvt_cir_reg_write(nvt, nvt->tx.buf[pos], CIR_STXFIFO);
			nvt->tx.cur_buf_num++;
		/* Disable TX FIFO Trigger Level Reach (TTR) interrupt */
		} else {
			tmp = nvt_cir_reg_read(nvt, CIR_IREN);
			nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN);
		}

		spin_unlock_irqrestore(&nvt->tx.lock, flags);

	}

	if (status & CIR_IRSTS_TFU) {
		spin_lock_irqsave(&nvt->tx.lock, flags);
		if (nvt->tx.tx_state == ST_TX_REPLY) {
			nvt->tx.tx_state = ST_TX_REQUEST;
			wake_up(&nvt->tx.queue);
		}
		spin_unlock_irqrestore(&nvt->tx.lock, flags);
	}

	nvt_dbg_verbose("%s done", __func__);
951
	return IRQ_HANDLED;
952 953 954 955
}

static void nvt_disable_cir(struct nvt_dev *nvt)
{
956 957 958 959
	unsigned long flags;

	spin_lock_irqsave(&nvt->nvt_lock, flags);

960 961 962 963 964 965 966 967 968 969 970 971 972
	/* disable CIR interrupts */
	nvt_cir_reg_write(nvt, 0, CIR_IREN);

	/* clear any and all pending interrupts */
	nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);

	/* clear all function enable flags */
	nvt_cir_reg_write(nvt, 0, CIR_IRCON);

	/* clear hardware rx and tx fifos */
	nvt_clear_cir_fifo(nvt);
	nvt_clear_tx_fifo(nvt);

973 974
	spin_unlock_irqrestore(&nvt->nvt_lock, flags);

975
	/* disable the CIR logical device */
976
	nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
977 978
}

979
static int nvt_open(struct rc_dev *dev)
980
{
981
	struct nvt_dev *nvt = dev->priv;
982 983 984
	unsigned long flags;

	spin_lock_irqsave(&nvt->nvt_lock, flags);
985 986 987 988 989 990 991 992 993 994 995 996

	/* set function enable flags */
	nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
			  CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
			  CIR_IRCON);

	/* clear all pending interrupts */
	nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);

	/* enable interrupts */
	nvt_set_cir_iren(nvt);

997 998
	spin_unlock_irqrestore(&nvt->nvt_lock, flags);

999 1000 1001
	/* enable the CIR logical device */
	nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);

1002 1003 1004
	return 0;
}

1005
static void nvt_close(struct rc_dev *dev)
1006
{
1007
	struct nvt_dev *nvt = dev->priv;
1008 1009 1010 1011 1012 1013 1014

	nvt_disable_cir(nvt);
}

/* Allocate memory, probe hardware, and initialize everything */
static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
{
1015 1016
	struct nvt_dev *nvt;
	struct rc_dev *rdev;
1017 1018
	int ret = -ENOMEM;

1019
	nvt = devm_kzalloc(&pdev->dev, sizeof(struct nvt_dev), GFP_KERNEL);
1020 1021 1022 1023
	if (!nvt)
		return ret;

	/* input device for IR remote (and tx) */
1024
	rdev = rc_allocate_device();
1025
	if (!rdev)
1026
		goto exit_free_dev_rdev;
1027 1028

	ret = -ENODEV;
1029 1030 1031 1032 1033 1034
	/* activate pnp device */
	if (pnp_activate_dev(pdev) < 0) {
		dev_err(&pdev->dev, "Could not activate PNP device!\n");
		goto exit_free_dev_rdev;
	}

1035 1036 1037 1038
	/* validate pnp resources */
	if (!pnp_port_valid(pdev, 0) ||
	    pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
		dev_err(&pdev->dev, "IR PNP Port not valid!\n");
1039
		goto exit_free_dev_rdev;
1040 1041 1042 1043
	}

	if (!pnp_irq_valid(pdev, 0)) {
		dev_err(&pdev->dev, "PNP IRQ not valid!\n");
1044
		goto exit_free_dev_rdev;
1045 1046 1047 1048 1049
	}

	if (!pnp_port_valid(pdev, 1) ||
	    pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
		dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
1050
		goto exit_free_dev_rdev;
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
	}

	nvt->cir_addr = pnp_port_start(pdev, 0);
	nvt->cir_irq  = pnp_irq(pdev, 0);

	nvt->cir_wake_addr = pnp_port_start(pdev, 1);

	nvt->cr_efir = CR_EFIR;
	nvt->cr_efdr = CR_EFDR;

	spin_lock_init(&nvt->nvt_lock);
	spin_lock_init(&nvt->tx.lock);

	pnp_set_drvdata(pdev, nvt);
	nvt->pdev = pdev;

	init_waitqueue_head(&nvt->tx.queue);

1069 1070 1071
	ret = nvt_hw_detect(nvt);
	if (ret)
		goto exit_free_dev_rdev;
1072 1073 1074 1075 1076 1077 1078

	/* Initialize CIR & CIR Wake Logical Devices */
	nvt_efm_enable(nvt);
	nvt_cir_ldev_init(nvt);
	nvt_cir_wake_ldev_init(nvt);
	nvt_efm_disable(nvt);

1079 1080 1081 1082
	/*
	 * Initialize CIR & CIR Wake Config Registers
	 * and enable logical devices
	 */
1083 1084 1085
	nvt_cir_regs_init(nvt);
	nvt_cir_wake_regs_init(nvt);

1086 1087 1088
	/* Set up the rc device */
	rdev->priv = nvt;
	rdev->driver_type = RC_DRIVER_IR_RAW;
1089
	rdev->allowed_protocols = RC_BIT_ALL;
1090 1091 1092 1093 1094
	rdev->open = nvt_open;
	rdev->close = nvt_close;
	rdev->tx_ir = nvt_tx_ir;
	rdev->s_tx_carrier = nvt_set_tx_carrier;
	rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
1095
	rdev->input_phys = "nuvoton/cir0";
1096 1097 1098 1099
	rdev->input_id.bustype = BUS_HOST;
	rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2;
	rdev->input_id.product = nvt->chip_major;
	rdev->input_id.version = nvt->chip_minor;
1100
	rdev->dev.parent = &pdev->dev;
1101 1102
	rdev->driver_name = NVT_DRIVER_NAME;
	rdev->map_name = RC_MAP_RC6_MCE;
1103
	rdev->timeout = MS_TO_NS(100);
1104 1105
	/* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
	rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
1106
#if 0
1107 1108
	rdev->min_timeout = XYZ;
	rdev->max_timeout = XYZ;
1109
	/* tx bits */
1110
	rdev->tx_resolution = XYZ;
1111
#endif
1112
	nvt->rdev = rdev;
1113

1114 1115 1116 1117
	ret = rc_register_device(rdev);
	if (ret)
		goto exit_free_dev_rdev;

1118 1119
	ret = -EBUSY;
	/* now claim resources */
1120
	if (!devm_request_region(&pdev->dev, nvt->cir_addr,
1121
			    CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
1122
		goto exit_unregister_device;
1123

1124 1125 1126
	if (devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr,
			     IRQF_SHARED, NVT_DRIVER_NAME, (void *)nvt))
		goto exit_unregister_device;
1127

1128
	if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr,
1129
			    CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake"))
1130
		goto exit_unregister_device;
1131

1132
	ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data);
1133 1134 1135
	if (ret)
		goto exit_unregister_device;

1136
	device_init_wakeup(&pdev->dev, true);
1137

1138
	dev_notice(&pdev->dev, "driver has been successfully loaded\n");
1139 1140 1141 1142 1143 1144 1145
	if (debug) {
		cir_dump_regs(nvt);
		cir_wake_dump_regs(nvt);
	}

	return 0;

1146 1147
exit_unregister_device:
	rc_unregister_device(rdev);
1148
	rdev = NULL;
1149
exit_free_dev_rdev:
1150
	rc_free_device(rdev);
1151 1152 1153 1154

	return ret;
}

1155
static void nvt_remove(struct pnp_dev *pdev)
1156 1157 1158
{
	struct nvt_dev *nvt = pnp_get_drvdata(pdev);

1159
	device_remove_file(&nvt->rdev->dev, &dev_attr_wakeup_data);
1160

1161
	nvt_disable_cir(nvt);
1162

1163 1164 1165
	/* enable CIR Wake (for IR power-on) */
	nvt_enable_wake(nvt);

1166
	rc_unregister_device(nvt->rdev);
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
}

static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
{
	struct nvt_dev *nvt = pnp_get_drvdata(pdev);
	unsigned long flags;

	nvt_dbg("%s called", __func__);

	spin_lock_irqsave(&nvt->tx.lock, flags);
	nvt->tx.tx_state = ST_TX_NONE;
	spin_unlock_irqrestore(&nvt->tx.lock, flags);

1180 1181
	spin_lock_irqsave(&nvt->nvt_lock, flags);

1182 1183 1184
	/* disable all CIR interrupts */
	nvt_cir_reg_write(nvt, 0, CIR_IREN);

1185 1186
	spin_unlock_irqrestore(&nvt->nvt_lock, flags);

1187
	/* disable cir logical dev */
1188
	nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204

	/* make sure wake is enabled */
	nvt_enable_wake(nvt);

	return 0;
}

static int nvt_resume(struct pnp_dev *pdev)
{
	struct nvt_dev *nvt = pnp_get_drvdata(pdev);

	nvt_dbg("%s called", __func__);

	nvt_cir_regs_init(nvt);
	nvt_cir_wake_regs_init(nvt);

1205
	return 0;
1206 1207 1208 1209 1210
}

static void nvt_shutdown(struct pnp_dev *pdev)
{
	struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1211

1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
	nvt_enable_wake(nvt);
}

static const struct pnp_device_id nvt_ids[] = {
	{ "WEC0530", 0 },   /* CIR */
	{ "NTN0530", 0 },   /* CIR for new chip's pnp id*/
	{ "", 0 },
};

static struct pnp_driver nvt_driver = {
	.name		= NVT_DRIVER_NAME,
	.id_table	= nvt_ids,
	.flags		= PNP_DRIVER_RES_DO_NOT_CHANGE,
	.probe		= nvt_probe,
1226
	.remove		= nvt_remove,
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
	.suspend	= nvt_suspend,
	.resume		= nvt_resume,
	.shutdown	= nvt_shutdown,
};

module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable debugging output");

MODULE_DEVICE_TABLE(pnp, nvt_ids);
MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");

MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
MODULE_LICENSE("GPL");

1241
module_pnp_driver(nvt_driver);