提交 853d0289 编写于 作者: D David Vrabel

xen/grant-table: pre-populate kernel unmap ops for xen_gnttab_unmap_refs()

When unmapping grants, instead of converting the kernel map ops to
unmap ops on the fly, pre-populate the set of unmap ops.

This allows the grant unmap for the kernel mappings to be trivially
batched in the future.
Signed-off-by: NDavid Vrabel <david.vrabel@citrix.com>
Reviewed-by: NStefano Stabellini <stefano.stabellini@eu.citrix.com>
上级 d8ac3dd4
......@@ -92,7 +92,7 @@ extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct page **pages, unsigned int count);
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
......
......@@ -102,7 +102,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count)
{
int i;
......
......@@ -55,7 +55,7 @@ extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
......
......@@ -816,7 +816,7 @@ static struct page *m2p_find_override(unsigned long mfn)
}
static int m2p_remove_override(struct page *page,
struct gnttab_map_grant_ref *kmap_op,
struct gnttab_unmap_grant_ref *kunmap_op,
unsigned long mfn)
{
unsigned long flags;
......@@ -840,7 +840,7 @@ static int m2p_remove_override(struct page *page,
list_del(&page->lru);
spin_unlock_irqrestore(&m2p_override_lock, flags);
if (kmap_op != NULL) {
if (kunmap_op != NULL) {
if (!PageHighMem(page)) {
struct multicall_space mcs;
struct gnttab_unmap_and_replace *unmap_op;
......@@ -855,13 +855,13 @@ static int m2p_remove_override(struct page *page,
* issued. In this case handle is going to -1 because
* it hasn't been modified yet.
*/
if (kmap_op->handle == -1)
if (kunmap_op->handle == -1)
xen_mc_flush();
/*
* Now if kmap_op->handle is negative it means that the
* hypercall actually returned an error.
*/
if (kmap_op->handle == GNTST_general_error) {
if (kunmap_op->handle == GNTST_general_error) {
pr_warn("m2p_remove_override: pfn %lx mfn %lx, failed to modify kernel mappings",
pfn, mfn);
put_balloon_scratch_page();
......@@ -873,9 +873,9 @@ static int m2p_remove_override(struct page *page,
mcs = __xen_mc_entry(
sizeof(struct gnttab_unmap_and_replace));
unmap_op = mcs.args;
unmap_op->host_addr = kmap_op->host_addr;
unmap_op->host_addr = kunmap_op->host_addr;
unmap_op->new_addr = scratch_page_address;
unmap_op->handle = kmap_op->handle;
unmap_op->handle = kunmap_op->handle;
MULTI_grant_table_op(mcs.mc,
GNTTABOP_unmap_and_replace, unmap_op, 1);
......@@ -887,7 +887,6 @@ static int m2p_remove_override(struct page *page,
xen_mc_issue(PARAVIRT_LAZY_MMU);
kmap_op->host_addr = 0;
put_balloon_scratch_page();
}
}
......@@ -912,7 +911,7 @@ static int m2p_remove_override(struct page *page,
}
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count)
{
int i, ret = 0;
......@@ -921,7 +920,7 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
if (kmap_ops &&
if (kunmap_ops &&
!in_interrupt() &&
paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
......@@ -942,8 +941,8 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
ClearPagePrivate(pages[i]);
set_phys_to_machine(pfn, pages[i]->index);
if (kmap_ops)
ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
if (kunmap_ops)
ret = m2p_remove_override(pages[i], &kunmap_ops[i], mfn);
if (ret)
goto out;
}
......
......@@ -91,6 +91,7 @@ struct grant_map {
struct gnttab_map_grant_ref *map_ops;
struct gnttab_unmap_grant_ref *unmap_ops;
struct gnttab_map_grant_ref *kmap_ops;
struct gnttab_unmap_grant_ref *kunmap_ops;
struct page **pages;
};
......@@ -124,6 +125,7 @@ static void gntdev_free_map(struct grant_map *map)
kfree(map->map_ops);
kfree(map->unmap_ops);
kfree(map->kmap_ops);
kfree(map->kunmap_ops);
kfree(map);
}
......@@ -140,11 +142,13 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
NULL == add->kmap_ops ||
NULL == add->kunmap_ops ||
NULL == add->pages)
goto err;
......@@ -155,6 +159,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
add->map_ops[i].handle = -1;
add->unmap_ops[i].handle = -1;
add->kmap_ops[i].handle = -1;
add->kunmap_ops[i].handle = -1;
}
add->index = 0;
......@@ -280,6 +285,8 @@ static int map_grant_pages(struct grant_map *map)
map->flags | GNTMAP_host_map,
map->grants[i].ref,
map->grants[i].domid);
gnttab_set_unmap_op(&map->kunmap_ops[i], address,
map->flags | GNTMAP_host_map, -1);
}
}
......@@ -290,13 +297,14 @@ static int map_grant_pages(struct grant_map *map)
return err;
for (i = 0; i < map->count; i++) {
if (map->map_ops[i].status)
if (map->map_ops[i].status) {
err = -EINVAL;
else {
BUG_ON(map->map_ops[i].handle == -1);
map->unmap_ops[i].handle = map->map_ops[i].handle;
pr_debug("map handle=%d\n", map->map_ops[i].handle);
continue;
}
map->unmap_ops[i].handle = map->map_ops[i].handle;
if (use_ptemod)
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
}
return err;
}
......@@ -316,7 +324,7 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
}
err = gnttab_unmap_refs(map->unmap_ops + offset,
use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset,
use_ptemod ? map->kunmap_ops + offset : NULL, map->pages + offset,
pages);
if (err)
return err;
......
......@@ -738,7 +738,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
EXPORT_SYMBOL_GPL(gnttab_map_refs);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count)
{
int ret;
......@@ -747,7 +747,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
if (ret)
return ret;
return clear_foreign_p2m_mapping(unmap_ops, kmap_ops, pages, count);
return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
......
......@@ -167,7 +167,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kunmap_ops,
struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
/* Perform a batch of grant map/copy operations. Retry every batch slot
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册