提交 599c9cb6 编写于 作者: S Stefan Hajnoczi

Merge remote-tracking branch 'sstabellini/tags/xen-20170516-tag' into staging

Xen 2017/05/16

# gpg: Signature made Tue 16 May 2017 08:18:32 PM BST
# gpg:                using RSA key 0x894F8F4870E1AE90
# gpg: Good signature from "Stefano Stabellini <stefano.stabellini@eu.citrix.com>"
# gpg:                 aka "Stefano Stabellini <sstabellini@kernel.org>"
# Primary key fingerprint: D04E 33AB A51F 67BA 07D3  0AEA 894F 8F48 70E1 AE90

* sstabellini/tags/xen-20170516-tag:
  xen: call qemu_set_cloexec instead of fcntl
  xen/9pfs: fix two resource leaks on error paths, discovered by Coverity
  configure: Remove -lxencall for Xen detection
  xen/mapcache: store dma information in revmapcache entries for debugging
Signed-off-by: NStefan Hajnoczi <stefanha@redhat.com>
......@@ -2015,7 +2015,7 @@ if test "$xen" != "no" ; then
else
xen_libs="-lxenstore -lxenctrl -lxenguest"
xen_stable_libs="-lxencall -lxenforeignmemory -lxengnttab -lxenevtchn"
xen_stable_libs="-lxenforeignmemory -lxengnttab -lxenevtchn"
# First we test whether Xen headers and libraries are available.
# If no, we are done and there is no Xen support.
......
......@@ -2084,10 +2084,10 @@ void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
* In that case just map until the end of the page.
*/
if (block->offset == 0) {
return xen_map_cache(addr, 0, 0);
return xen_map_cache(addr, 0, 0, false);
}
block->host = xen_map_cache(block->offset, block->max_length, 1);
block->host = xen_map_cache(block->offset, block->max_length, 1, false);
}
return ramblock_ptr(block, addr);
}
......@@ -2117,10 +2117,10 @@ static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
* In that case just map the requested area.
*/
if (block->offset == 0) {
return xen_map_cache(addr, *size, 1);
return xen_map_cache(addr, *size, 1, true);
}
block->host = xen_map_cache(block->offset, block->max_length, 1);
block->host = xen_map_cache(block->offset, block->max_length, 1, true);
}
return ramblock_ptr(block, addr);
......
......@@ -332,12 +332,14 @@ static int xen_9pfs_connect(struct XenDevice *xendev)
str = g_strdup_printf("ring-ref%u", i);
if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
&xen_9pdev->rings[i].ref) == -1) {
g_free(str);
goto out;
}
g_free(str);
str = g_strdup_printf("event-channel-%u", i);
if (xenstore_read_fe_int(&xen_9pdev->xendev, str,
&xen_9pdev->rings[i].evtchn) == -1) {
g_free(str);
goto out;
}
g_free(str);
......@@ -378,7 +380,7 @@ static int xen_9pfs_connect(struct XenDevice *xendev)
if (xen_9pdev->rings[i].evtchndev == NULL) {
goto out;
}
fcntl(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), F_SETFD, FD_CLOEXEC);
qemu_set_cloexec(xenevtchn_fd(xen_9pdev->rings[i].evtchndev));
xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain
(xen_9pdev->rings[i].evtchndev,
xendev->dom,
......
......@@ -62,6 +62,7 @@ typedef struct MapCacheRev {
hwaddr paddr_index;
hwaddr size;
QTAILQ_ENTRY(MapCacheRev) next;
bool dma;
} MapCacheRev;
typedef struct MapCache {
......@@ -202,7 +203,7 @@ static void xen_remap_bucket(MapCacheEntry *entry,
}
static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size,
uint8_t lock)
uint8_t lock, bool dma)
{
MapCacheEntry *entry, *pentry = NULL;
hwaddr address_index;
......@@ -289,6 +290,7 @@ tryagain:
if (lock) {
MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev));
entry->lock++;
reventry->dma = dma;
reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset;
reventry->paddr_index = mapcache->last_entry->paddr_index;
reventry->size = entry->size;
......@@ -300,12 +302,12 @@ tryagain:
}
uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
uint8_t lock)
uint8_t lock, bool dma)
{
uint8_t *p;
mapcache_lock();
p = xen_map_cache_unlocked(phys_addr, size, lock);
p = xen_map_cache_unlocked(phys_addr, size, lock, dma);
mapcache_unlock();
return p;
}
......@@ -426,8 +428,11 @@ void xen_invalidate_map_cache(void)
mapcache_lock();
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
DPRINTF("There should be no locked mappings at this time, "
"but "TARGET_FMT_plx" -> %p is present\n",
if (!reventry->dma) {
continue;
}
fprintf(stderr, "Locked DMA mapping while invalidating mapcache!"
" "TARGET_FMT_plx" -> %p is present\n",
reventry->paddr_index, reventry->vaddr_req);
}
......
......@@ -147,7 +147,7 @@ static struct XenDevice *xen_be_get_xendev(const char *type, int dom, int dev,
qdev_unplug(DEVICE(xendev), NULL);
return NULL;
}
fcntl(xenevtchn_fd(xendev->evtchndev), F_SETFD, FD_CLOEXEC);
qemu_set_cloexec(xenevtchn_fd(xendev->evtchndev));
if (ops->flags & DEVOPS_FLAG_NEED_GNTDEV) {
xendev->gnttabdev = xengnttab_open(NULL, 0);
......
......@@ -17,7 +17,7 @@ typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr start_addr,
void xen_map_cache_init(phys_offset_to_gaddr_t f,
void *opaque);
uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
uint8_t lock);
uint8_t lock, bool dma);
ram_addr_t xen_ram_addr_from_mapcache(void *ptr);
void xen_invalidate_map_cache_entry(uint8_t *buffer);
void xen_invalidate_map_cache(void);
......@@ -31,7 +31,8 @@ static inline void xen_map_cache_init(phys_offset_to_gaddr_t f,
static inline uint8_t *xen_map_cache(hwaddr phys_addr,
hwaddr size,
uint8_t lock)
uint8_t lock,
bool dma)
{
abort();
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册