diff --git a/docs/rdma.txt b/docs/rdma.txt index 1f5d9e9fe4e2e5e84ea5922be435bb68d96909de..2bdd0a5be0b17156fb21308c401802de24166a10 100644 --- a/docs/rdma.txt +++ b/docs/rdma.txt @@ -18,7 +18,7 @@ Contents: * RDMA Migration Protocol Description * Versioning and Capabilities * QEMUFileRDMA Interface -* Migration of pc.ram +* Migration of VM's ram * Error handling * TODO @@ -149,7 +149,7 @@ The only difference between a SEND message and an RDMA message is that SEND messages cause notifications to be posted to the completion queue (CQ) on the infiniband receiver side, whereas RDMA messages (used -for pc.ram) do not (to behave like an actual DMA). +for VM's ram) do not (to behave like an actual DMA). Messages in infiniband require two things: @@ -355,7 +355,7 @@ If the buffer is empty, then we follow the same steps listed above and issue another "QEMU File" protocol command, asking for a new SEND message to re-fill the buffer. -Migration of pc.ram: +Migration of VM's ram: ==================== At the beginning of the migration, (migration-rdma.c), diff --git a/migration-rdma.c b/migration-rdma.c index d99812c4519d70e6ef7446d6bbc214f42850ee85..b32dbdfccd3ebbdfa4b3a6d7e4c3f48dbd7dc16b 100644 --- a/migration-rdma.c +++ b/migration-rdma.c @@ -2523,7 +2523,7 @@ static void *qemu_rdma_data_init(const char *host_port, Error **errp) /* * QEMUFile interface to the control channel. * SEND messages for control only. - * pc.ram is handled with regular RDMA messages. + * VM's ram is handled with regular RDMA messages. */ static int qemu_rdma_put_buffer(void *opaque, const uint8_t *buf, int64_t pos, int size) @@ -2539,7 +2539,7 @@ static int qemu_rdma_put_buffer(void *opaque, const uint8_t *buf, /* * Push out any writes that - * we're queued up for pc.ram. + * we're queued up for VM's ram. */ ret = qemu_rdma_write_flush(f, rdma); if (ret < 0) {