提交 9c904a75 编写于 作者: P Peter Maydell

Merge remote-tracking branch 'remotes/gkurz/tags/for-upstream' into staging

- transport specific callbacks (for Xen)
- fix crash (2.8 regression)
- 9p functional tests

# gpg: Signature made Tue 03 Jan 2017 17:30:58 GMT
# gpg:                using DSA key 0x02FC3AEB0101DBC2
# gpg: Good signature from "Greg Kurz <groug@kaod.org>"
# gpg:                 aka "Greg Kurz <groug@free.fr>"
# gpg:                 aka "Greg Kurz <gkurz@fr.ibm.com>"
# gpg:                 aka "Greg Kurz <gkurz@linux.vnet.ibm.com>"
# gpg:                 aka "Gregory Kurz (Groug) <groug@free.fr>"
# gpg:                 aka "Gregory Kurz (Cimai Technology) <gkurz@cimai.com>"
# gpg:                 aka "Gregory Kurz (Meiosys Technology) <gkurz@meiosys.com>"
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 2BD4 3B44 535E C0A7 9894  DBA2 02FC 3AEB 0101 DBC2

* remotes/gkurz/tags/for-upstream:
  tests: virtio-9p: ".." cannot be used to walk out of the shared directory
  tests: virtio-9p: no slash in path elements during walk
  tests: virtio-9p: add walk operation test
  tests: virtio-9p: add attach operation test
  tests: virtio-9p: add version operation test
  9pfs: fix P9_NOTAG and P9_NOFID macros
  tests: virtio-9p: code refactoring
  tests: virtio-9p: rename PCI configuration test
  9pfs: fix crash when fsdev is missing
  9pfs: introduce init_out/in_iov_from_pdu
  9pfs: call v9fs_init_qiov_from_pdu before v9fs_pack
  9pfs: introduce transport specific callbacks
  9pfs: move pdus to V9fsState
Signed-off-by: NPeter Maydell <peter.maydell@linaro.org>
......@@ -47,7 +47,7 @@ ssize_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
va_list ap;
va_start(ap, fmt);
ret = virtio_pdu_vmarshal(pdu, offset, fmt, ap);
ret = pdu->s->transport->pdu_vmarshal(pdu, offset, fmt, ap);
va_end(ap);
return ret;
......@@ -59,7 +59,7 @@ ssize_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
va_list ap;
va_start(ap, fmt);
ret = virtio_pdu_vunmarshal(pdu, offset, fmt, ap);
ret = pdu->s->transport->pdu_vunmarshal(pdu, offset, fmt, ap);
va_end(ap);
return ret;
......@@ -67,7 +67,7 @@ ssize_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
static void pdu_push_and_notify(V9fsPDU *pdu)
{
virtio_9p_push_and_notify(pdu);
pdu->s->transport->push_and_notify(pdu);
}
static int omode_to_uflags(int8_t mode)
......@@ -1633,14 +1633,43 @@ out_nofid:
pdu_complete(pdu, err);
}
/*
* Create a QEMUIOVector for a sub-region of PDU iovecs
*
* @qiov: uninitialized QEMUIOVector
* @skip: number of bytes to skip from beginning of PDU
* @size: number of bytes to include
* @is_write: true - write, false - read
*
* The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up
* with qemu_iovec_destroy().
*/
static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
size_t skip, size_t size,
bool is_write)
{
QEMUIOVector elem;
struct iovec *iov;
unsigned int niov;
if (is_write) {
pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov);
} else {
pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size);
}
qemu_iovec_init_external(&elem, iov, niov);
qemu_iovec_init(qiov, niov);
qemu_iovec_concat(qiov, &elem, skip, size);
}
static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
uint64_t off, uint32_t max_count)
{
ssize_t err;
size_t offset = 7;
uint64_t read_count;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
VirtQueueElement *elem = v->elems[pdu->idx];
QEMUIOVector qiov_full;
if (fidp->fs.xattr.len < off) {
read_count = 0;
......@@ -1656,9 +1685,11 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
}
offset += err;
err = v9fs_pack(elem->in_sg, elem->in_num, offset,
v9fs_init_qiov_from_pdu(&qiov_full, pdu, 0, read_count, false);
err = v9fs_pack(qiov_full.iov, qiov_full.niov, offset,
((char *)fidp->fs.xattr.value) + off,
read_count);
qemu_iovec_destroy(&qiov_full);
if (err < 0) {
return err;
}
......@@ -1732,32 +1763,6 @@ static int coroutine_fn v9fs_do_readdir_with_stat(V9fsPDU *pdu,
return count;
}
/*
* Create a QEMUIOVector for a sub-region of PDU iovecs
*
* @qiov: uninitialized QEMUIOVector
* @skip: number of bytes to skip from beginning of PDU
* @size: number of bytes to include
* @is_write: true - write, false - read
*
* The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up
* with qemu_iovec_destroy().
*/
static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
size_t skip, size_t size,
bool is_write)
{
QEMUIOVector elem;
struct iovec *iov;
unsigned int niov;
virtio_init_iov_from_pdu(pdu, &iov, &niov, is_write);
qemu_iovec_init_external(&elem, iov, niov);
qemu_iovec_init(qiov, niov);
qemu_iovec_concat(qiov, &elem, skip, size);
}
static void coroutine_fn v9fs_read(void *opaque)
{
int32_t fid;
......@@ -3440,7 +3445,6 @@ void pdu_submit(V9fsPDU *pdu)
/* Returns 0 on success, 1 on failure. */
int v9fs_device_realize_common(V9fsState *s, Error **errp)
{
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
int i, len;
struct stat stat;
FsDriverEntry *fse;
......@@ -3451,9 +3455,9 @@ int v9fs_device_realize_common(V9fsState *s, Error **errp)
QLIST_INIT(&s->free_list);
QLIST_INIT(&s->active_list);
for (i = 0; i < (MAX_REQ - 1); i++) {
QLIST_INSERT_HEAD(&s->free_list, &v->pdus[i], next);
v->pdus[i].s = s;
v->pdus[i].idx = i;
QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next);
s->pdus[i].s = s;
s->pdus[i].idx = i;
}
v9fs_path_init(&path);
......@@ -3521,7 +3525,7 @@ int v9fs_device_realize_common(V9fsState *s, Error **errp)
rc = 0;
out:
if (rc) {
if (s->ops->cleanup && s->ctx.private) {
if (s->ops && s->ops->cleanup && s->ctx.private) {
s->ops->cleanup(&s->ctx);
}
g_free(s->tag);
......
......@@ -99,8 +99,8 @@ enum p9_proto_version {
V9FS_PROTO_2000L = 0x02,
};
#define P9_NOTAG (u16)(~0)
#define P9_NOFID (u32)(~0)
#define P9_NOTAG UINT16_MAX
#define P9_NOFID UINT32_MAX
#define P9_MAXWELEM 16
#define FID_REFERENCED 0x1
......@@ -229,6 +229,8 @@ typedef struct V9fsState
char *tag;
enum p9_proto_version proto_version;
int32_t msize;
V9fsPDU pdus[MAX_REQ];
const struct V9fsTransport *transport;
/*
* lock ensuring atomic path update
* on rename.
......@@ -342,4 +344,24 @@ void pdu_free(V9fsPDU *pdu);
void pdu_submit(V9fsPDU *pdu);
void v9fs_reset(V9fsState *s);
struct V9fsTransport {
ssize_t (*pdu_vmarshal)(V9fsPDU *pdu, size_t offset, const char *fmt,
va_list ap);
ssize_t (*pdu_vunmarshal)(V9fsPDU *pdu, size_t offset, const char *fmt,
va_list ap);
void (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
unsigned int *pniov, size_t size);
void (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
unsigned int *pniov);
void (*push_and_notify)(V9fsPDU *pdu);
};
static inline int v9fs_register_transport(V9fsState *s,
const struct V9fsTransport *t)
{
assert(!s->transport);
s->transport = t;
return 0;
}
#endif
......@@ -20,7 +20,9 @@
#include "hw/virtio/virtio-access.h"
#include "qemu/iov.h"
void virtio_9p_push_and_notify(V9fsPDU *pdu)
static const struct V9fsTransport virtio_9p_transport;
static void virtio_9p_push_and_notify(V9fsPDU *pdu)
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
......@@ -126,6 +128,7 @@ static void virtio_9p_device_realize(DeviceState *dev, Error **errp)
v->config_size = sizeof(struct virtio_9p_config) + strlen(s->fsconf.tag);
virtio_init(vdev, "virtio-9p", VIRTIO_ID_9P, v->config_size);
v->vq = virtio_add_queue(vdev, MAX_REQ, handle_9p_output);
v9fs_register_transport(s, &virtio_9p_transport);
out:
return;
......@@ -148,8 +151,8 @@ static void virtio_9p_reset(VirtIODevice *vdev)
v9fs_reset(&v->state);
}
ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
const char *fmt, va_list ap)
static ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
const char *fmt, va_list ap)
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
......@@ -158,8 +161,8 @@ ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
return v9fs_iov_vmarshal(elem->in_sg, elem->in_num, offset, 1, fmt, ap);
}
ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
const char *fmt, va_list ap)
static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
const char *fmt, va_list ap)
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
......@@ -168,22 +171,37 @@ ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
return v9fs_iov_vunmarshal(elem->out_sg, elem->out_num, offset, 1, fmt, ap);
}
void virtio_init_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
unsigned int *pniov, bool is_write)
/* The size parameter is used by other transports. Do not drop it. */
static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
unsigned int *pniov, size_t size)
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
VirtQueueElement *elem = v->elems[pdu->idx];
if (is_write) {
*piov = elem->out_sg;
*pniov = elem->out_num;
} else {
*piov = elem->in_sg;
*pniov = elem->in_num;
}
*piov = elem->in_sg;
*pniov = elem->in_num;
}
static void virtio_init_out_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
unsigned int *pniov)
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
VirtQueueElement *elem = v->elems[pdu->idx];
*piov = elem->out_sg;
*pniov = elem->out_num;
}
static const struct V9fsTransport virtio_9p_transport = {
.pdu_vmarshal = virtio_pdu_vmarshal,
.pdu_vunmarshal = virtio_pdu_vunmarshal,
.init_in_iov_from_pdu = virtio_init_in_iov_from_pdu,
.init_out_iov_from_pdu = virtio_init_out_iov_from_pdu,
.push_and_notify = virtio_9p_push_and_notify,
};
/* virtio-9p device */
static const VMStateDescription vmstate_virtio_9p = {
......
......@@ -10,20 +10,10 @@ typedef struct V9fsVirtioState
VirtIODevice parent_obj;
VirtQueue *vq;
size_t config_size;
V9fsPDU pdus[MAX_REQ];
VirtQueueElement *elems[MAX_REQ];
V9fsState state;
} V9fsVirtioState;
void virtio_9p_push_and_notify(V9fsPDU *pdu);
ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
const char *fmt, va_list ap);
ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
const char *fmt, va_list ap);
void virtio_init_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
unsigned int *pniov, bool is_write);
#define TYPE_VIRTIO_9P "virtio-9p-device"
#define VIRTIO_9P(obj) \
OBJECT_CHECK(V9fsVirtioState, (obj), TYPE_VIRTIO_9P)
......
......@@ -16,61 +16,53 @@
#include "libqos/virtio-pci.h"
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_pci.h"
#include "hw/9pfs/9p.h"
static const char mount_tag[] = "qtest";
static char *test_share;
typedef struct {
QVirtioDevice *dev;
QOSState *qs;
QVirtQueue *vq;
char *test_share;
uint16_t p9_req_tag;
} QVirtIO9P;
static QOSState *qvirtio_9p_start(void)
static QVirtIO9P *qvirtio_9p_start(const char *driver)
{
const char *arch = qtest_get_arch();
const char *cmd = "-fsdev local,id=fsdev0,security_model=none,path=%s "
"-device virtio-9p-pci,fsdev=fsdev0,mount_tag=%s";
"-device %s,fsdev=fsdev0,mount_tag=%s";
QVirtIO9P *v9p = g_new0(QVirtIO9P, 1);
test_share = g_strdup("/tmp/qtest.XXXXXX");
g_assert_nonnull(mkdtemp(test_share));
v9p->test_share = g_strdup("/tmp/qtest.XXXXXX");
g_assert_nonnull(mkdtemp(v9p->test_share));
if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
return qtest_pc_boot(cmd, test_share, mount_tag);
}
if (strcmp(arch, "ppc64") == 0) {
return qtest_spapr_boot(cmd, test_share, mount_tag);
v9p->qs = qtest_pc_boot(cmd, v9p->test_share, driver, mount_tag);
} else if (strcmp(arch, "ppc64") == 0) {
v9p->qs = qtest_spapr_boot(cmd, v9p->test_share, driver, mount_tag);
} else {
g_printerr("virtio-9p tests are only available on x86 or ppc64\n");
exit(EXIT_FAILURE);
}
g_printerr("virtio-9p tests are only available on x86 or ppc64\n");
exit(EXIT_FAILURE);
}
static void qvirtio_9p_stop(QOSState *qs)
{
qtest_shutdown(qs);
rmdir(test_share);
g_free(test_share);
return v9p;
}
static void pci_nop(void)
static void qvirtio_9p_stop(QVirtIO9P *v9p)
{
QOSState *qs;
qs = qvirtio_9p_start();
qvirtio_9p_stop(qs);
qtest_shutdown(v9p->qs);
rmdir(v9p->test_share);
g_free(v9p->test_share);
g_free(v9p);
}
typedef struct {
QVirtioDevice *dev;
QOSState *qs;
QVirtQueue *vq;
} QVirtIO9P;
static QVirtIO9P *qvirtio_9p_pci_init(QOSState *qs)
static QVirtIO9P *qvirtio_9p_pci_start(void)
{
QVirtIO9P *v9p;
QVirtioPCIDevice *dev;
v9p = g_new0(QVirtIO9P, 1);
v9p->qs = qs;
dev = qvirtio_pci_device_find(v9p->qs->pcibus, VIRTIO_ID_9P);
QVirtIO9P *v9p = qvirtio_9p_start("virtio-9p-pci");
QVirtioPCIDevice *dev = qvirtio_pci_device_find(v9p->qs->pcibus,
VIRTIO_ID_9P);
g_assert_nonnull(dev);
g_assert_cmphex(dev->vdev.device_type, ==, VIRTIO_ID_9P);
v9p->dev = (QVirtioDevice *) dev;
......@@ -84,26 +76,20 @@ static QVirtIO9P *qvirtio_9p_pci_init(QOSState *qs)
return v9p;
}
static void qvirtio_9p_pci_free(QVirtIO9P *v9p)
static void qvirtio_9p_pci_stop(QVirtIO9P *v9p)
{
qvirtqueue_cleanup(v9p->dev->bus, v9p->vq, v9p->qs->alloc);
qvirtio_pci_device_disable(container_of(v9p->dev, QVirtioPCIDevice, vdev));
g_free(v9p->dev);
g_free(v9p);
qvirtio_9p_stop(v9p);
}
static void pci_basic_config(void)
static void pci_config(QVirtIO9P *v9p)
{
QVirtIO9P *v9p;
size_t tag_len;
size_t tag_len = qvirtio_config_readw(v9p->dev, 0);
char *tag;
int i;
QOSState *qs;
qs = qvirtio_9p_start();
v9p = qvirtio_9p_pci_init(qs);
tag_len = qvirtio_config_readw(v9p->dev, 0);
g_assert_cmpint(tag_len, ==, strlen(mount_tag));
tag = g_malloc(tag_len);
......@@ -112,16 +98,406 @@ static void pci_basic_config(void)
}
g_assert_cmpmem(tag, tag_len, mount_tag, tag_len);
g_free(tag);
}
#define P9_MAX_SIZE 4096 /* Max size of a T-message or R-message */
typedef struct {
QVirtIO9P *v9p;
uint16_t tag;
uint64_t t_msg;
uint32_t t_size;
uint64_t r_msg;
/* No r_size, it is hardcoded to P9_MAX_SIZE */
size_t t_off;
size_t r_off;
} P9Req;
static void v9fs_memwrite(P9Req *req, const void *addr, size_t len)
{
memwrite(req->t_msg + req->t_off, addr, len);
req->t_off += len;
}
static void v9fs_memskip(P9Req *req, size_t len)
{
req->r_off += len;
}
static void v9fs_memrewind(P9Req *req, size_t len)
{
req->r_off -= len;
}
static void v9fs_memread(P9Req *req, void *addr, size_t len)
{
memread(req->r_msg + req->r_off, addr, len);
req->r_off += len;
}
static void v9fs_uint16_write(P9Req *req, uint16_t val)
{
uint16_t le_val = cpu_to_le16(val);
v9fs_memwrite(req, &le_val, 2);
}
static void v9fs_uint16_read(P9Req *req, uint16_t *val)
{
v9fs_memread(req, val, 2);
le16_to_cpus(val);
}
static void v9fs_uint32_write(P9Req *req, uint32_t val)
{
uint32_t le_val = cpu_to_le32(val);
v9fs_memwrite(req, &le_val, 4);
}
static void v9fs_uint32_read(P9Req *req, uint32_t *val)
{
v9fs_memread(req, val, 4);
le32_to_cpus(val);
}
/* len[2] string[len] */
static uint16_t v9fs_string_size(const char *string)
{
size_t len = strlen(string);
g_assert_cmpint(len, <=, UINT16_MAX);
return 2 + len;
}
static void v9fs_string_write(P9Req *req, const char *string)
{
int len = strlen(string);
g_assert_cmpint(len, <=, UINT16_MAX);
v9fs_uint16_write(req, (uint16_t) len);
v9fs_memwrite(req, string, len);
}
static void v9fs_string_read(P9Req *req, uint16_t *len, char **string)
{
uint16_t local_len;
v9fs_uint16_read(req, &local_len);
if (len) {
*len = local_len;
}
if (string) {
*string = g_malloc(local_len);
v9fs_memread(req, *string, local_len);
} else {
v9fs_memskip(req, local_len);
}
}
typedef struct {
uint32_t size;
uint8_t id;
uint16_t tag;
} QEMU_PACKED P9Hdr;
static P9Req *v9fs_req_init(QVirtIO9P *v9p, uint32_t size, uint8_t id,
uint16_t tag)
{
P9Req *req = g_new0(P9Req, 1);
uint32_t t_size = 7 + size; /* 9P header has well-known size of 7 bytes */
P9Hdr hdr = {
.size = cpu_to_le32(t_size),
.id = id,
.tag = cpu_to_le16(tag)
};
g_assert_cmpint(t_size, <=, P9_MAX_SIZE);
qvirtio_9p_pci_free(v9p);
qvirtio_9p_stop(qs);
req->v9p = v9p;
req->t_size = t_size;
req->t_msg = guest_alloc(v9p->qs->alloc, req->t_size);
v9fs_memwrite(req, &hdr, 7);
req->tag = tag;
return req;
}
static void v9fs_req_send(P9Req *req)
{
QVirtIO9P *v9p = req->v9p;
uint32_t free_head;
req->r_msg = guest_alloc(v9p->qs->alloc, P9_MAX_SIZE);
free_head = qvirtqueue_add(v9p->vq, req->t_msg, req->t_size, false, true);
qvirtqueue_add(v9p->vq, req->r_msg, P9_MAX_SIZE, true, false);
qvirtqueue_kick(v9p->dev, v9p->vq, free_head);
req->t_off = 0;
}
static void v9fs_req_recv(P9Req *req, uint8_t id)
{
QVirtIO9P *v9p = req->v9p;
P9Hdr hdr;
int i;
for (i = 0; i < 10; i++) {
qvirtio_wait_queue_isr(v9p->dev, v9p->vq, 1000 * 1000);
v9fs_memread(req, &hdr, 7);
le32_to_cpus(&hdr.size);
le16_to_cpus(&hdr.tag);
if (hdr.size >= 7) {
break;
}
v9fs_memrewind(req, 7);
}
g_assert_cmpint(hdr.size, >=, 7);
g_assert_cmpint(hdr.size, <=, P9_MAX_SIZE);
g_assert_cmpint(hdr.tag, ==, req->tag);
if (hdr.id != id && hdr.id == P9_RLERROR) {
uint32_t err;
v9fs_uint32_read(req, &err);
g_printerr("Received Rlerror (%d) instead of Response %d\n", err, id);
g_assert_not_reached();
}
g_assert_cmpint(hdr.id, ==, id);
}
static void v9fs_req_free(P9Req *req)
{
QVirtIO9P *v9p = req->v9p;
guest_free(v9p->qs->alloc, req->t_msg);
guest_free(v9p->qs->alloc, req->r_msg);
g_free(req);
}
/* size[4] Rlerror tag[2] ecode[4] */
static void v9fs_rlerror(P9Req *req, uint32_t *err)
{
v9fs_req_recv(req, P9_RLERROR);
v9fs_uint32_read(req, err);
v9fs_req_free(req);
}
/* size[4] Tversion tag[2] msize[4] version[s] */
static P9Req *v9fs_tversion(QVirtIO9P *v9p, uint32_t msize, const char *version)
{
P9Req *req = v9fs_req_init(v9p, 4 + v9fs_string_size(version), P9_TVERSION,
P9_NOTAG);
v9fs_uint32_write(req, msize);
v9fs_string_write(req, version);
v9fs_req_send(req);
return req;
}
/* size[4] Rversion tag[2] msize[4] version[s] */
static void v9fs_rversion(P9Req *req, uint16_t *len, char **version)
{
uint32_t msize;
v9fs_req_recv(req, P9_RVERSION);
v9fs_uint32_read(req, &msize);
g_assert_cmpint(msize, ==, P9_MAX_SIZE);
if (len || version) {
v9fs_string_read(req, len, version);
}
v9fs_req_free(req);
}
/* size[4] Tattach tag[2] fid[4] afid[4] uname[s] aname[s] n_uname[4] */
static P9Req *v9fs_tattach(QVirtIO9P *v9p, uint32_t fid, uint32_t n_uname)
{
const char *uname = ""; /* ignored by QEMU */
const char *aname = ""; /* ignored by QEMU */
P9Req *req = v9fs_req_init(v9p, 4 + 4 + 2 + 2 + 4, P9_TATTACH,
++(v9p->p9_req_tag));
v9fs_uint32_write(req, fid);
v9fs_uint32_write(req, P9_NOFID);
v9fs_string_write(req, uname);
v9fs_string_write(req, aname);
v9fs_uint32_write(req, n_uname);
v9fs_req_send(req);
return req;
}
typedef char v9fs_qid[13];
/* size[4] Rattach tag[2] qid[13] */
static void v9fs_rattach(P9Req *req, v9fs_qid *qid)
{
v9fs_req_recv(req, P9_RATTACH);
if (qid) {
v9fs_memread(req, qid, 13);
}
v9fs_req_free(req);
}
/* size[4] Twalk tag[2] fid[4] newfid[4] nwname[2] nwname*(wname[s]) */
static P9Req *v9fs_twalk(QVirtIO9P *v9p, uint32_t fid, uint32_t newfid,
uint16_t nwname, char *const wnames[])
{
P9Req *req;
int i;
uint32_t size = 4 + 4 + 2;
for (i = 0; i < nwname; i++) {
size += v9fs_string_size(wnames[i]);
}
req = v9fs_req_init(v9p, size, P9_TWALK, ++(v9p->p9_req_tag));
v9fs_uint32_write(req, fid);
v9fs_uint32_write(req, newfid);
v9fs_uint16_write(req, nwname);
for (i = 0; i < nwname; i++) {
v9fs_string_write(req, wnames[i]);
}
v9fs_req_send(req);
return req;
}
/* size[4] Rwalk tag[2] nwqid[2] nwqid*(wqid[13]) */
static void v9fs_rwalk(P9Req *req, uint16_t *nwqid, v9fs_qid **wqid)
{
uint16_t local_nwqid;
v9fs_req_recv(req, P9_RWALK);
v9fs_uint16_read(req, &local_nwqid);
if (nwqid) {
*nwqid = local_nwqid;
}
if (wqid) {
*wqid = g_malloc(local_nwqid * 13);
v9fs_memread(req, *wqid, local_nwqid * 13);
}
v9fs_req_free(req);
}
static void fs_version(QVirtIO9P *v9p)
{
const char *version = "9P2000.L";
uint16_t server_len;
char *server_version;
P9Req *req;
req = v9fs_tversion(v9p, P9_MAX_SIZE, version);
v9fs_rversion(req, &server_len, &server_version);
g_assert_cmpmem(server_version, server_len, version, strlen(version));
g_free(server_version);
}
static void fs_attach(QVirtIO9P *v9p)
{
P9Req *req;
fs_version(v9p);
req = v9fs_tattach(v9p, 0, getuid());
v9fs_rattach(req, NULL);
}
static void fs_walk(QVirtIO9P *v9p)
{
char *wnames[P9_MAXWELEM], *paths[P9_MAXWELEM];
char *last_path = v9p->test_share;
uint16_t nwqid;
v9fs_qid *wqid;
int i;
P9Req *req;
for (i = 0; i < P9_MAXWELEM; i++) {
wnames[i] = g_strdup_printf("%s%d", __func__, i);
last_path = paths[i] = g_strdup_printf("%s/%s", last_path, wnames[i]);
g_assert(!mkdir(paths[i], 0700));
}
fs_attach(v9p);
req = v9fs_twalk(v9p, 0, 1, P9_MAXWELEM, wnames);
v9fs_rwalk(req, &nwqid, &wqid);
g_assert_cmpint(nwqid, ==, P9_MAXWELEM);
for (i = 0; i < P9_MAXWELEM; i++) {
rmdir(paths[P9_MAXWELEM - i - 1]);
g_free(paths[P9_MAXWELEM - i - 1]);
g_free(wnames[i]);
}
g_free(wqid);
}
static void fs_walk_no_slash(QVirtIO9P *v9p)
{
char *const wnames[] = { g_strdup(" /") };
P9Req *req;
uint32_t err;
fs_attach(v9p);
req = v9fs_twalk(v9p, 0, 1, 1, wnames);
v9fs_rlerror(req, &err);
g_assert_cmpint(err, ==, ENOENT);
g_free(wnames[0]);
}
static void fs_walk_dotdot(QVirtIO9P *v9p)
{
char *const wnames[] = { g_strdup("..") };
v9fs_qid root_qid, *wqid;
P9Req *req;
fs_version(v9p);
req = v9fs_tattach(v9p, 0, getuid());
v9fs_rattach(req, &root_qid);
req = v9fs_twalk(v9p, 0, 1, 1, wnames);
v9fs_rwalk(req, NULL, &wqid); /* We now we'll get one qid */
g_assert_cmpmem(&root_qid, 13, wqid[0], 13);
g_free(wqid);
g_free(wnames[0]);
}
typedef void (*v9fs_test_fn)(QVirtIO9P *v9p);
static void v9fs_run_pci_test(gconstpointer data)
{
v9fs_test_fn fn = data;
QVirtIO9P *v9p = qvirtio_9p_pci_start();
if (fn) {
fn(v9p);
}
qvirtio_9p_pci_stop(v9p);
}
static void v9fs_qtest_pci_add(const char *path, v9fs_test_fn fn)
{
qtest_add_data_func(path, fn, v9fs_run_pci_test);
}
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
qtest_add_func("/virtio/9p/pci/nop", pci_nop);
qtest_add_func("/virtio/9p/pci/basic/configuration", pci_basic_config);
v9fs_qtest_pci_add("/virtio/9p/pci/nop", NULL);
v9fs_qtest_pci_add("/virtio/9p/pci/config", pci_config);
v9fs_qtest_pci_add("/virtio/9p/pci/fs/version/basic", fs_version);
v9fs_qtest_pci_add("/virtio/9p/pci/fs/attach/basic", fs_attach);
v9fs_qtest_pci_add("/virtio/9p/pci/fs/walk/basic", fs_walk);
v9fs_qtest_pci_add("/virtio/9p/pci/fs/walk/no_slash", fs_walk_no_slash);
v9fs_qtest_pci_add("/virtio/9p/pci/fs/walk/dotdot_from_root",
fs_walk_dotdot);
return g_test_run();
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册