All of lore.kernel.org
 help / color / mirror / Atom feed
* Bug in use of grant tables in blkback.c error path?
@ 2005-11-06 12:24 Harry Butterworth
  2005-11-06 15:32 ` Keir Fraser
  0 siblings, 1 reply; 3+ messages in thread
From: Harry Butterworth @ 2005-11-06 12:24 UTC (permalink / raw)
  To: keir.fraser, christopher.clark; +Cc: xen-devel

In dispatch_rw_block_io after a call to HYPERVISOR_grant_table_op, there
is the following code which calls fast_flush_area and breaks out of the
loop early if one of the handles returned from HYPERVISOR_grant_table_op
is negative:

	for (i = 0; i < nseg; i++) {
		if (unlikely(map[i].handle < 0)) {
			DPRINTK("invalid buffer -- could not remap it\n");
			fast_flush_area(pending_idx, nseg);
			goto bad_descriptor;
		}

		phys_to_machine_mapping[__pa(MMAP_VADDR(
			pending_idx, i)) >> PAGE_SHIFT] =
			FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);

		pending_handle(pending_idx, i) = map[i].handle;
	}

The implementation of fast_flush_area uses pending_handle and is called
to flush the whole range nseg but the loop above is setting up
pending_handle as it goes along so the handles for the pages after the
erroneous one are not set up when fast_flush area is called.

Here's the bit of fast_flush_area that uses pending_handle:

	for (i = 0; i < nr_pages; i++) {
		handle = pending_handle(idx, i); <<<<<<<<<<<<<<<<<
		if (handle == BLKBACK_INVALID_HANDLE)
			continue;
		unmap[i].host_addr      = MMAP_VADDR(idx, i);
		unmap[i].dev_bus_addr   = 0;
		unmap[i].handle         = handle;
		pending_handle(idx, i)  = BLKBACK_INVALID_HANDLE;
		invcount++;
	}

I also checked the implementation of gnttab_map_grant_ref:

static long
gnttab_map_grant_ref(
    gnttab_map_grant_ref_t *uop, unsigned int count)
{
    int i;

    for ( i = 0; i < count; i++ )
        (void)__gnttab_map_grant_ref(&uop[i]);

    return 0;
}

gnttab_map_grant_ref seems to keep going and attempt to map the
remaining pages after an error is encountered.

All in all, this looks like a bug to me where failure to map a grant
reference in the middle of a set would result in pages kept mapped in
the backend when fast_flush_area fails to clean them up.

Am I right?

Harry.

-- 
Harry Butterworth <harry@hebutterworth.freeserve.co.uk>

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: Bug in use of grant tables in blkback.c error path?
  2005-11-06 12:24 Bug in use of grant tables in blkback.c error path? Harry Butterworth
@ 2005-11-06 15:32 ` Keir Fraser
  2005-11-06 16:18   ` Harry Butterworth
  0 siblings, 1 reply; 3+ messages in thread
From: Keir Fraser @ 2005-11-06 15:32 UTC (permalink / raw)
  To: Harry Butterworth; +Cc: xen-devel


On 6 Nov 2005, at 12:24, Harry Butterworth wrote:

> All in all, this looks like a bug to me where failure to map a grant
> reference in the middle of a set would result in pages kept mapped in
> the backend when fast_flush_area fails to clean them up.
>
> Am I right?

Yes, it's a bug. We'll sort out a patch.

  -- Keir

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: Bug in use of grant tables in blkback.c error path?
  2005-11-06 15:32 ` Keir Fraser
@ 2005-11-06 16:18   ` Harry Butterworth
  0 siblings, 0 replies; 3+ messages in thread
From: Harry Butterworth @ 2005-11-06 16:18 UTC (permalink / raw)
  To: Keir Fraser; +Cc: xen-devel

On Sun, 2005-11-06 at 15:32 +0000, Keir Fraser wrote:
> On 6 Nov 2005, at 12:24, Harry Butterworth wrote:
> 
> > All in all, this looks like a bug to me where failure to map a grant
> > reference in the middle of a set would result in pages kept mapped in
> > the backend when fast_flush_area fails to clean them up.
> >
> > Am I right?
> 
> Yes, it's a bug. We'll sort out a patch.
> 

OK.  I ended up doing the following in my code.  I'm not particularly
confident about it but hopefully it's correct.  As in blkback, the
phys_to_machine_mapping gets left lying around after unmap.  I assume
this is not a problem.

typedef struct xenidc_grant_table_mapping_context_struct
  xenidc_grant_table_mapping_context;

struct xenidc_grant_table_mapping_context_struct
{
    xenidc_buffer_mappable_class    * class;
    xenidc_buffer_resource_provider * provider;
    unsigned long                     page_count;
    unsigned long                     mmap_vaddress;
    int16_t                           handle
      [ XENIDC_REMOTE_BUFFER_REFERENCE_GRANT_TABLE_REFERENCE_COUNT ];
};

static void xenidc_grant_table_unmap_rbr
  ( xenidc_buffer_mappable_class ** context_in );

static xenidc_buffer_mappable_class ** xenidc_grant_table_map_rbr
(
    xenidc_buffer_mappable_class     * class,
    xenidc_remote_buffer_reference   * rbr,
    xenidc_address                   * address,
    xenidc_buffer_resource_provider  * provider,
    void                            ** mapping
)
{
    trace();

    {
        xenidc_grant_table_mapping_context * context =
          xenidc_buffer_resource_provider_allocate_page( provider );

        struct gnttab_map_grant_ref map
         [ XENIDC_REMOTE_BUFFER_REFERENCE_GRANT_TABLE_REFERENCE_COUNT ];

        unsigned long first_page = rbr->byte_offset / PAGE_SIZE;
        unsigned long final_page =
          ( rbr->byte_offset + rbr->byte_count - 1 ) / PAGE_SIZE;

        context->class = class;

        context->provider = provider;

        context->page_count = final_page - first_page + 1;

        context->mmap_vaddress =
          xenidc_buffer_resource_provider_allocate_empty_page_range
            ( provider, context->page_count );

        {
            int i;

            for( i = 0; i < context->page_count; i++ )
            {
                map[ i ].host_addr =
                  context->mmap_vaddress + ( i * PAGE_SIZE );
                map[ i ].dom       =
                  xenidc_address_query_remote_domain_id( address );
                map[ i ].ref       =
                  rbr->base.grant_table.reference[ first_page + i ];
                map[ i ].flags     = GNTMAP_host_map;
            }
        }

        {
            int error = HYPERVISOR_grant_table_op
              ( GNTTABOP_map_grant_ref, map, context->page_count );

            BUG_ON( error );
        }

        {
            int error = 0;

            {
                int i;

                for( i = 0; i < context->page_count; i++ )
                {
                    context->handle[ i ] = map[ i ].handle;

                    if( unlikely( map[ i ].handle < 0 ) )
                    {
                        error = 1;
                    }
                }
            }

            if( !error )
            {
                int i;

                for( i = 0; i < context->page_count; i++ )
                {
                    phys_to_machine_mapping
                    [
                        __pa( context->mmap_vaddress + ( i *
PAGE_SIZE ) )
                        >>
                        PAGE_SHIFT
                    ]
                    =
                    FOREIGN_FRAME( map[ i ].dev_bus_addr >>
PAGE_SHIFT );
                }
            }
            else
            {
                xenidc_grant_table_unmap_rbr( &context->class );

                context = NULL;
            }
        }

        if( context != NULL )
        {
            *mapping =
              (void *)( context->mmap_vaddress + ( rbr->byte_offset %
PAGE_SIZE ) );

            return &context->class;
        }
        else
        {
            return NULL;
        }
    }
}

static void xenidc_grant_table_unmap_rbr
  ( xenidc_buffer_mappable_class ** context_in )
{
    trace();

    {
        xenidc_grant_table_mapping_context * context = container_of
          ( context_in, xenidc_grant_table_mapping_context, class );

        {
            struct gnttab_unmap_grant_ref unmap

[ XENIDC_REMOTE_BUFFER_REFERENCE_GRANT_TABLE_REFERENCE_COUNT ];

            int i;
            int j;

            for( i = 0, j = 0; i < context->page_count; i++ )
            {
                if( context->handle[ i ] >= 0 )
                {
                    unmap[ j ].host_addr    =
                      context->mmap_vaddress + ( i * PAGE_SIZE );
                    unmap[ j ].dev_bus_addr = 0;
                    unmap[ j ].handle       = context->handle[ i ];

                    j++;
                }
            }

            if( j != 0 )
            {
                int error = HYPERVISOR_grant_table_op
                  ( GNTTABOP_unmap_grant_ref, unmap, j );

                BUG_ON( error );
            }
        }

        xenidc_buffer_resource_provider_free_empty_page_range
          ( context->provider, context->mmap_vaddress );

        xenidc_buffer_resource_provider_free_page
          ( context->provider, context );
    }
}



>   -- Keir
> 
> 
-- 
Harry Butterworth <harry@hebutterworth.freeserve.co.uk>

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2005-11-06 16:18 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2005-11-06 12:24 Bug in use of grant tables in blkback.c error path? Harry Butterworth
2005-11-06 15:32 ` Keir Fraser
2005-11-06 16:18   ` Harry Butterworth

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.