... and use it where suitable (the tmem caller doesn't propagate an error code). While it doesn't matter as much, also make donate_page() follow suit on x86 (on ARM it already returns -ENOSYS). Also move their declarations to common code and add __must_check. Signed-off-by: Jan Beulich --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -1090,7 +1090,7 @@ int donate_page(struct domain *d, struct int steal_page( struct domain *d, struct page_info *page, unsigned int memflags) { - return -1; + return -EOPNOTSUPP; } int page_is_ram_type(unsigned long mfn, unsigned long mem_type) --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -4424,7 +4424,7 @@ int donate_page( page_to_mfn(page), d->domain_id, owner ? owner->domain_id : DOMID_INVALID, page->count_info, page->u.inuse.type_info); - return -1; + return -EINVAL; } int steal_page( @@ -4435,7 +4435,7 @@ int steal_page( const struct domain *owner = dom_xen; if ( paging_mode_external(d) ) - return -1; + return -EOPNOTSUPP; spin_lock(&d->page_alloc_lock); @@ -4490,7 +4490,7 @@ int steal_page( page_to_mfn(page), d->domain_id, owner ? owner->domain_id : DOMID_INVALID, page->count_info, page->u.inuse.type_info); - return -1; + return -EINVAL; } static int __do_update_va_mapping( --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -1843,10 +1843,10 @@ gnttab_transfer( } page = mfn_to_page(mfn); - if ( steal_page(d, page, 0) < 0 ) + if ( (rc = steal_page(d, page, 0)) < 0 ) { put_gfn(d, gop.mfn); - gop.status = GNTST_bad_page; + gop.status = rc == -EINVAL ? GNTST_bad_page : GNTST_general_error; goto copyback; } --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -566,10 +566,10 @@ static long memory_exchange(XEN_GUEST_HA page = mfn_to_page(mfn); - if ( unlikely(steal_page(d, page, MEMF_no_refcount)) ) + rc = steal_page(d, page, MEMF_no_refcount); + if ( unlikely(rc) ) { put_gfn(d, gmfn + k); - rc = -EINVAL; goto fail; } --- a/xen/include/asm-arm/mm.h +++ b/xen/include/asm-arm/mm.h @@ -322,11 +322,6 @@ static inline int relinquish_shared_page /* Arch-specific portion of memory_op hypercall. */ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg); -int steal_page( - struct domain *d, struct page_info *page, unsigned int memflags); -int donate_page( - struct domain *d, struct page_info *page, unsigned int memflags); - #define domain_set_alloc_bitsize(d) ((void)0) #define domain_clamp_alloc_bitsize(d, b) (b) --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -550,11 +550,6 @@ long subarch_memory_op(unsigned long cmd int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void)); int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void)); -int steal_page( - struct domain *d, struct page_info *page, unsigned int memflags); -int donate_page( - struct domain *d, struct page_info *page, unsigned int memflags); - int map_ldt_shadow_page(unsigned int); #define NIL(type) ((type *)-sizeof(type)) --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -567,8 +567,12 @@ int xenmem_add_to_physmap_one(struct dom union xen_add_to_physmap_batch_extra extra, unsigned long idx, gfn_t gfn); -/* Returns 0 on success, or negative on error. */ +/* Return 0 on success, or negative on error. */ int __must_check guest_remove_page(struct domain *d, unsigned long gmfn); +int __must_check steal_page(struct domain *d, struct page_info *page, + unsigned int memflags); +int __must_check donate_page(struct domain *d, struct page_info *page, + unsigned int memflags); #define RAM_TYPE_CONVENTIONAL 0x00000001 #define RAM_TYPE_RESERVED 0x00000002