提交 d85a1302 编写于 作者: A Anthony Liguori

Merge remote-tracking branch 'kwolf/for-anthony' into staging

......@@ -47,7 +47,12 @@ struct BDRVCURLState;
typedef struct CURLAIOCB {
BlockDriverAIOCB common;
QEMUBH *bh;
QEMUIOVector *qiov;
int64_t sector_num;
int nb_sectors;
size_t start;
size_t end;
} CURLAIOCB;
......@@ -76,6 +81,7 @@ typedef struct BDRVCURLState {
static void curl_clean_state(CURLState *s);
static void curl_multi_do(void *arg);
static int curl_aio_flush(void *opaque);
static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
void *s, void *sp)
......@@ -83,14 +89,16 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd);
switch (action) {
case CURL_POLL_IN:
qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, NULL, NULL, s);
qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, curl_aio_flush,
NULL, s);
break;
case CURL_POLL_OUT:
qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, NULL, NULL, s);
qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, curl_aio_flush,
NULL, s);
break;
case CURL_POLL_INOUT:
qemu_aio_set_fd_handler(fd, curl_multi_do,
curl_multi_do, NULL, NULL, s);
qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do,
curl_aio_flush, NULL, s);
break;
case CURL_POLL_REMOVE:
qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL, NULL);
......@@ -412,6 +420,21 @@ out_noclean:
return -EINVAL;
}
static int curl_aio_flush(void *opaque)
{
BDRVCURLState *s = opaque;
int i, j;
for (i=0; i < CURL_NUM_STATES; i++) {
for(j=0; j < CURL_NUM_ACB; j++) {
if (s->states[i].acb[j]) {
return 1;
}
}
}
return 0;
}
static void curl_aio_cancel(BlockDriverAIOCB *blockacb)
{
// Do we have to implement canceling? Seems to work without...
......@@ -422,43 +445,42 @@ static AIOPool curl_aio_pool = {
.cancel = curl_aio_cancel,
};
static BlockDriverAIOCB *curl_aio_readv(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
BlockDriverCompletionFunc *cb, void *opaque)
static void curl_readv_bh_cb(void *p)
{
BDRVCURLState *s = bs->opaque;
CURLAIOCB *acb;
size_t start = sector_num * SECTOR_SIZE;
size_t end;
CURLState *state;
acb = qemu_aio_get(&curl_aio_pool, bs, cb, opaque);
if (!acb)
return NULL;
CURLAIOCB *acb = p;
BDRVCURLState *s = acb->common.bs->opaque;
acb->qiov = qiov;
qemu_bh_delete(acb->bh);
acb->bh = NULL;
size_t start = acb->sector_num * SECTOR_SIZE;
size_t end;
// In case we have the requested data already (e.g. read-ahead),
// we can just call the callback and be done.
switch (curl_find_buf(s, start, nb_sectors * SECTOR_SIZE, acb)) {
switch (curl_find_buf(s, start, acb->nb_sectors * SECTOR_SIZE, acb)) {
case FIND_RET_OK:
qemu_aio_release(acb);
// fall through
case FIND_RET_WAIT:
return &acb->common;
return;
default:
break;
}
// No cache found, so let's start a new request
state = curl_init_state(s);
if (!state)
return NULL;
if (!state) {
acb->common.cb(acb->common.opaque, -EIO);
qemu_aio_release(acb);
return;
}
acb->start = 0;
acb->end = (nb_sectors * SECTOR_SIZE);
acb->end = (acb->nb_sectors * SECTOR_SIZE);
state->buf_off = 0;
if (state->orig_buf)
......@@ -471,12 +493,38 @@ static BlockDriverAIOCB *curl_aio_readv(BlockDriverState *bs,
snprintf(state->range, 127, "%zd-%zd", start, end);
DPRINTF("CURL (AIO): Reading %d at %zd (%s)\n",
(nb_sectors * SECTOR_SIZE), start, state->range);
(acb->nb_sectors * SECTOR_SIZE), start, state->range);
curl_easy_setopt(state->curl, CURLOPT_RANGE, state->range);
curl_multi_add_handle(s->multi, state->curl);
curl_multi_do(s);
}
static BlockDriverAIOCB *curl_aio_readv(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
BlockDriverCompletionFunc *cb, void *opaque)
{
CURLAIOCB *acb;
acb = qemu_aio_get(&curl_aio_pool, bs, cb, opaque);
if (!acb) {
return NULL;
}
acb->qiov = qiov;
acb->sector_num = sector_num;
acb->nb_sectors = nb_sectors;
acb->bh = qemu_bh_new(curl_readv_bh_cb, acb);
if (!acb->bh) {
DPRINTF("CURL: qemu_bh_new failed\n");
return NULL;
}
qemu_bh_schedule(acb->bh);
return &acb->common;
}
......
......@@ -370,6 +370,43 @@ static MemoryRegionOps ahci_mem_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
static uint64_t ahci_idp_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
AHCIState *s = opaque;
if (addr == s->idp_offset) {
/* index register */
return s->idp_index;
} else if (addr == s->idp_offset + 4) {
/* data register - do memory read at location selected by index */
return ahci_mem_read(opaque, s->idp_index, size);
} else {
return 0;
}
}
static void ahci_idp_write(void *opaque, target_phys_addr_t addr,
uint64_t val, unsigned size)
{
AHCIState *s = opaque;
if (addr == s->idp_offset) {
/* index register - mask off reserved bits */
s->idp_index = (uint32_t)val & ((AHCI_MEM_BAR_SIZE - 1) & ~3);
} else if (addr == s->idp_offset + 4) {
/* data register - do memory write at location selected by index */
ahci_mem_write(opaque, s->idp_index, val, size);
}
}
static MemoryRegionOps ahci_idp_ops = {
.read = ahci_idp_read,
.write = ahci_idp_write,
.endianness = DEVICE_LITTLE_ENDIAN,
};
static void ahci_reg_init(AHCIState *s)
{
int i;
......@@ -1130,7 +1167,9 @@ void ahci_init(AHCIState *s, DeviceState *qdev, int ports)
s->dev = g_malloc0(sizeof(AHCIDevice) * ports);
ahci_reg_init(s);
/* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */
memory_region_init_io(&s->mem, &ahci_mem_ops, s, "ahci", 0x1000);
memory_region_init_io(&s->mem, &ahci_mem_ops, s, "ahci", AHCI_MEM_BAR_SIZE);
memory_region_init_io(&s->idp, &ahci_idp_ops, s, "ahci-idp", 32);
irqs = qemu_allocate_irqs(ahci_irq_set, s, s->ports);
for (i = 0; i < s->ports; i++) {
......@@ -1150,6 +1189,7 @@ void ahci_init(AHCIState *s, DeviceState *qdev, int ports)
void ahci_uninit(AHCIState *s)
{
memory_region_destroy(&s->mem);
memory_region_destroy(&s->idp);
g_free(s->dev);
}
......
......@@ -24,7 +24,7 @@
#ifndef HW_IDE_AHCI_H
#define HW_IDE_AHCI_H
#define AHCI_PCI_BAR 5
#define AHCI_MEM_BAR_SIZE 0x1000
#define AHCI_MAX_PORTS 32
#define AHCI_MAX_SG 168 /* hardware max is 64K */
#define AHCI_DMA_BOUNDARY 0xffffffff
......@@ -212,6 +212,10 @@
#define RES_FIS_SDBFIS 0x58
#define RES_FIS_UFIS 0x60
#define SATA_CAP_SIZE 0x8
#define SATA_CAP_REV 0x2
#define SATA_CAP_BAR 0x4
typedef struct AHCIControlRegs {
uint32_t cap;
uint32_t ghc;
......@@ -290,6 +294,9 @@ typedef struct AHCIState {
AHCIDevice *dev;
AHCIControlRegs control_regs;
MemoryRegion mem;
MemoryRegion idp; /* Index-Data Pair I/O port space */
unsigned idp_offset; /* Offset of index in I/O port space */
uint32_t idp_index; /* Current IDP index */
int ports;
qemu_irq irq;
} AHCIState;
......
......@@ -71,6 +71,14 @@
#include <hw/ide/pci.h>
#include <hw/ide/ahci.h>
#define ICH9_SATA_CAP_OFFSET 0xA8
#define ICH9_IDP_BAR 4
#define ICH9_MEM_BAR 5
#define ICH9_IDP_INDEX 0x10
#define ICH9_IDP_INDEX_LOG2 0x04
static const VMStateDescription vmstate_ahci = {
.name = "ahci",
.unmigratable = 1,
......@@ -79,6 +87,8 @@ static const VMStateDescription vmstate_ahci = {
static int pci_ich9_ahci_init(PCIDevice *dev)
{
struct AHCIPCIState *d;
int sata_cap_offset;
uint8_t *sata_cap;
d = DO_UPCAST(struct AHCIPCIState, card, dev);
ahci_init(&d->ahci, &dev->qdev, 6);
......@@ -97,7 +107,22 @@ static int pci_ich9_ahci_init(PCIDevice *dev)
msi_init(dev, 0x50, 1, true, false);
d->ahci.irq = d->card.irq[0];
pci_register_bar(&d->card, 5, 0, &d->ahci.mem);
pci_register_bar(&d->card, ICH9_IDP_BAR, PCI_BASE_ADDRESS_SPACE_IO,
&d->ahci.idp);
pci_register_bar(&d->card, ICH9_MEM_BAR, PCI_BASE_ADDRESS_SPACE_MEMORY,
&d->ahci.mem);
sata_cap_offset = pci_add_capability(&d->card, PCI_CAP_ID_SATA,
ICH9_SATA_CAP_OFFSET, SATA_CAP_SIZE);
if (sata_cap_offset < 0) {
return sata_cap_offset;
}
sata_cap = d->card.config + sata_cap_offset;
pci_set_word(sata_cap + SATA_CAP_REV, 0x10);
pci_set_long(sata_cap + SATA_CAP_BAR,
(ICH9_IDP_BAR + 0x4) | (ICH9_IDP_INDEX_LOG2 << 4));
d->ahci.idp_offset = ICH9_IDP_INDEX;
return 0;
}
......
......@@ -211,6 +211,7 @@
#define PCI_CAP_ID_AGP3 0x0E /* AGP Target PCI-PCI bridge */
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
#define PCI_CAP_ID_SATA 0x12 /* Serial ATA */
#define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */
#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
#define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */
......
......@@ -439,7 +439,7 @@ int nbd_client(int fd)
return ret;
}
#else
int nbd_init(int fd, int csock, off_t size, size_t blocksize)
int nbd_init(int fd, int csock, uint32_t flags, off_t size, size_t blocksize)
{
errno = ENOTSUP;
return -1;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册