All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] nv50: H.264/MPEG2 decoding support via VP2, available on NV84-NV96, NVA0
@ 2013-06-27 11:26 Ilia Mirkin
  2013-06-29 18:07 ` [Nouveau] " Emil Velikov
       [not found] ` <1372332404-16547-1-git-send-email-imirkin-FrUbXkNCsVf2fBVCVOL8/A@public.gmane.org>
  0 siblings, 2 replies; 7+ messages in thread
From: Ilia Mirkin @ 2013-06-27 11:26 UTC (permalink / raw)
  To: Maarten Lankhorst
  Cc: mesa-dev-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Adds H.264 and MPEG2 codec support via VP2, using firmware from the
blob. Acceleration is supported at the bitstream level for H.264 and
IDCT level for MPEG2.

Known issues:
 - H.264 interlaced doesn't render properly
 - H.264 shows very occasional artifacts on a small fraction of videos
 - MPEG2 + VDPAU shows frequent but small artifacts, which aren't there
   when using XvMC on the same videos

Signed-off-by: Ilia Mirkin <imirkin-FrUbXkNCsVf2fBVCVOL8/A@public.gmane.org>
---

I did try to work out the known issues above, but so far to no avail.

The kernel support for these engines is not in mainline yet, but it's likely
going to appear in 3.11. I figured that there would be a bunch of feedback so
I might as well send it out early.

I played around a lot with XvMC performance, the mb function shows up as #1 in
the profiles, with the SSE4.2 optimizations I put in, it drops to #2.
Something clever can likely be done to improve VDPAU performance as well,
e.g. using SSE to do the inverse quantization operations, but I've left that
out. (It gets tricky because a lot of the data is 0's, so it's unclear whether
it's faster to use SSE to do operations on everything or one-at-a-time on the
non-0's like I have it now.) Even with these, XvMC ends up ~20% faster than
plain CPU decoding, and likely that percent improves on older CPUs that can't
decode MPEG2 quite as quickly. VDPAU provides further improvements (likely
because it is able to skip mb's while XvMC can't), but there are artifacts for
reasons unknown.

Note that in order to get XvMC to work, you need my previous patch (or
something similar) since otherwise libXvMCnouveau can't be dlopen'd:
http://lists.freedesktop.org/archives/mesa-dev/2013-June/040949.html

If you want to test it out, the kernel patch:
http://lists.freedesktop.org/archives/nouveau/2013-June/012821.html

Firmware:
https://github.com/imirkin/re-vp2/blob/master/extract_firmware.py

 src/gallium/drivers/nv50/Makefile.sources |   5 +-
 src/gallium/drivers/nv50/nv50_context.c   |  13 +-
 src/gallium/drivers/nv50/nv50_context.h   |  24 +
 src/gallium/drivers/nv50/nv50_miptree.c   |  27 ++
 src/gallium/drivers/nv50/nv50_resource.h  |   1 +
 src/gallium/drivers/nv50/nv50_screen.c    |  13 +-
 src/gallium/drivers/nv50/nv50_winsys.h    |   4 +
 src/gallium/drivers/nv50/nv84_video.c     | 778 ++++++++++++++++++++++++++++++
 src/gallium/drivers/nv50/nv84_video.h     | 134 +++++
 src/gallium/drivers/nv50/nv84_video_bsp.c | 251 ++++++++++
 src/gallium/drivers/nv50/nv84_video_vp.c  | 521 ++++++++++++++++++++
 11 files changed, 1768 insertions(+), 3 deletions(-)
 create mode 100644 src/gallium/drivers/nv50/nv84_video.c
 create mode 100644 src/gallium/drivers/nv50/nv84_video.h
 create mode 100644 src/gallium/drivers/nv50/nv84_video_bsp.c
 create mode 100644 src/gallium/drivers/nv50/nv84_video_vp.c

diff --git a/src/gallium/drivers/nv50/Makefile.sources b/src/gallium/drivers/nv50/Makefile.sources
index 1092570..0fdac51 100644
--- a/src/gallium/drivers/nv50/Makefile.sources
+++ b/src/gallium/drivers/nv50/Makefile.sources
@@ -13,7 +13,10 @@ C_SOURCES := \
 	nv50_program.c \
 	nv50_shader_state.c \
 	nv50_push.c \
-	nv50_query.c
+	nv50_query.c \
+	nv84_video.c \
+	nv84_video_bsp.c \
+	nv84_video_vp.c
 
 CODEGEN_NV50_SOURCES := \
 	codegen/nv50_ir.cpp \
diff --git a/src/gallium/drivers/nv50/nv50_context.c b/src/gallium/drivers/nv50/nv50_context.c
index 5781c4b..79a0473 100644
--- a/src/gallium/drivers/nv50/nv50_context.c
+++ b/src/gallium/drivers/nv50/nv50_context.c
@@ -258,7 +258,18 @@ nv50_create(struct pipe_screen *pscreen, void *priv)
    draw_set_rasterize_stage(nv50->draw, nv50_draw_render_stage(nv50));
 #endif
 
-   nouveau_context_init_vdec(&nv50->base);
+   if (screen->base.device->chipset < 0x84) {
+      /* PMPEG */
+      nouveau_context_init_vdec(&nv50->base);
+   } else if (screen->base.device->chipset < 0x98 ||
+              screen->base.device->chipset == 0xa0) {
+      /* VP2 */
+      pipe->create_video_decoder = nv84_create_decoder;
+      pipe->create_video_buffer = nv84_video_buffer_create;
+   } else {
+      /* Unsupported, but need to init pointers. */
+      nouveau_context_init_vdec(&nv50->base);
+   }
 
    flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RD;
 
diff --git a/src/gallium/drivers/nv50/nv50_context.h b/src/gallium/drivers/nv50/nv50_context.h
index 0a83131..b204cc8 100644
--- a/src/gallium/drivers/nv50/nv50_context.h
+++ b/src/gallium/drivers/nv50/nv50_context.h
@@ -289,4 +289,28 @@ void nv50_vertex_arrays_validate(struct nv50_context *nv50);
 /* nv50_push.c */
 void nv50_push_vbo(struct nv50_context *, const struct pipe_draw_info *);
 
+/* nv84_video.c */
+struct pipe_video_decoder *
+nv84_create_decoder(struct pipe_context *context,
+                    enum pipe_video_profile profile,
+                    enum pipe_video_entrypoint entrypoint,
+                    enum pipe_video_chroma_format chroma_format,
+                    unsigned width, unsigned height,
+                    unsigned max_references,
+                    bool expect_chunked_decode);
+
+struct pipe_video_buffer *
+nv84_video_buffer_create(struct pipe_context *pipe,
+                         const struct pipe_video_buffer *template);
+
+int
+nv84_screen_get_video_param(struct pipe_screen *pscreen,
+                            enum pipe_video_profile profile,
+                            enum pipe_video_cap param);
+
+boolean
+nv84_screen_video_supported(struct pipe_screen *screen,
+                            enum pipe_format format,
+                            enum pipe_video_profile profile);
+
 #endif
diff --git a/src/gallium/drivers/nv50/nv50_miptree.c b/src/gallium/drivers/nv50/nv50_miptree.c
index 036f1c7..28be768 100644
--- a/src/gallium/drivers/nv50/nv50_miptree.c
+++ b/src/gallium/drivers/nv50/nv50_miptree.c
@@ -239,6 +239,28 @@ nv50_miptree_init_layout_linear(struct nv50_miptree *mt, unsigned pitch_align)
 }
 
 static void
+nv50_miptree_init_layout_video(struct nv50_miptree *mt)
+{
+   const struct pipe_resource *pt = &mt->base.base;
+   const unsigned blocksize = util_format_get_blocksize(pt->format);
+
+   assert(pt->last_level == 0);
+   assert(mt->ms_x == 0 && mt->ms_y == 0);
+   assert(!util_format_is_compressed(pt->format));
+
+   mt->layout_3d = pt->target == PIPE_TEXTURE_3D;
+
+   mt->level[0].tile_mode = 0x20;
+   mt->level[0].pitch = align(pt->width0 * blocksize, 64);
+   mt->total_size = align(pt->height0, 16) * mt->level[0].pitch * (mt->layout_3d ? pt->depth0 : 1);
+
+   if (pt->array_size > 1) {
+      mt->layer_stride = align(mt->total_size, NV50_TILE_SIZE(0x20));
+      mt->total_size = mt->layer_stride * pt->array_size;
+   }
+}
+
+static void
 nv50_miptree_init_layout_tiled(struct nv50_miptree *mt)
 {
    struct pipe_resource *pt = &mt->base.base;
@@ -311,6 +333,11 @@ nv50_miptree_create(struct pipe_screen *pscreen,
       return NULL;
    }
 
+   if (unlikely(pt->flags & NV50_RESOURCE_FLAG_VIDEO)) {
+      nv50_miptree_init_layout_video(mt);
+      /* BO allocation done by client */
+      return pt;
+   } else
    if (bo_config.nv50.memtype != 0) {
       nv50_miptree_init_layout_tiled(mt);
    } else
diff --git a/src/gallium/drivers/nv50/nv50_resource.h b/src/gallium/drivers/nv50/nv50_resource.h
index 6b92463..c520a72 100644
--- a/src/gallium/drivers/nv50/nv50_resource.h
+++ b/src/gallium/drivers/nv50/nv50_resource.h
@@ -16,6 +16,7 @@ nv50_init_resource_functions(struct pipe_context *pcontext);
 void
 nv50_screen_init_resource_functions(struct pipe_screen *pscreen);
 
+#define NV50_RESOURCE_FLAG_VIDEO (NOUVEAU_RESOURCE_FLAG_DRV_PRIV << 0)
 
 #define NV50_TILE_SHIFT_X(m) 6
 #define NV50_TILE_SHIFT_Y(m) ((((m) >> 4) & 0xf) + 2)
diff --git a/src/gallium/drivers/nv50/nv50_screen.c b/src/gallium/drivers/nv50/nv50_screen.c
index b6da303..86bad6b 100644
--- a/src/gallium/drivers/nv50/nv50_screen.c
+++ b/src/gallium/drivers/nv50/nv50_screen.c
@@ -646,7 +646,18 @@ nv50_screen_create(struct nouveau_device *dev)
 
    nv50_screen_init_resource_functions(pscreen);
 
-   nouveau_screen_init_vdec(&screen->base);
+   if (screen->base.device->chipset < 0x84) {
+      /* PMPEG */
+      nouveau_screen_init_vdec(&screen->base);
+   } else if (screen->base.device->chipset < 0x98 ||
+              screen->base.device->chipset == 0xa0) {
+      /* VP2 */
+      screen->base.base.get_video_param = nv84_screen_get_video_param;
+      screen->base.base.is_video_format_supported = nv84_screen_video_supported;
+   } else {
+      /* Unsupported, but need to init pointers. */
+      nouveau_screen_init_vdec(&screen->base);
+   }
 
    ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0, 4096,
                         NULL, &screen->fence.bo);
diff --git a/src/gallium/drivers/nv50/nv50_winsys.h b/src/gallium/drivers/nv50/nv50_winsys.h
index 145ee70..e04247b 100644
--- a/src/gallium/drivers/nv50/nv50_winsys.h
+++ b/src/gallium/drivers/nv50/nv50_winsys.h
@@ -60,6 +60,10 @@ PUSH_REFN(struct nouveau_pushbuf *push, struct nouveau_bo *bo, uint32_t flags)
 #define SUBC_COMPUTE(m) 6, (m)
 #define NV50_COMPUTE(n) SUBC_COMPUTE(NV50_COMPUTE_##n)
 
+/* These are expected to be on their own pushbufs */
+#define SUBC_BSP(m) 2, (m)
+#define SUBC_VP(m) 2, (m)
+
 
 static INLINE uint32_t
 NV50_FIFO_PKHDR(int subc, int mthd, unsigned size)
diff --git a/src/gallium/drivers/nv50/nv84_video.c b/src/gallium/drivers/nv50/nv84_video.c
new file mode 100644
index 0000000..064178c
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv84_video.c
@@ -0,0 +1,778 @@
+/*
+ * Copyright 2013 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+
+#include "util/u_format.h"
+#include "util/u_sampler.h"
+#include "vl/vl_zscan.h"
+
+#include "nv84_video.h"
+
+static int
+nv84_copy_firmware(const char *path, void *dest, size_t len)
+{
+   int fd = open(path, O_RDONLY | O_CLOEXEC);
+   ssize_t r;
+   if (fd < 0) {
+      fprintf(stderr, "opening firmware file %s failed: %m\n", path);
+      return 1;
+   }
+   r = read(fd, dest, len);
+   close(fd);
+
+   if (r != len) {
+      fprintf(stderr, "reading firwmare file %s failed: %m\n", path);
+      return 1;
+   }
+
+   return 0;
+}
+
+static int
+filesize(const char *path)
+{
+   int ret;
+   struct stat statbuf;
+
+   ret = stat(path, &statbuf);
+   if (ret)
+      return ret;
+   return statbuf.st_size;
+}
+
+static struct nouveau_bo *
+nv84_load_firmwares(struct nouveau_device *dev, struct nv84_decoder *dec,
+                    const char *fw1, const char *fw2)
+{
+   int ret, size1, size2 = 0;
+   struct nouveau_bo *fw;
+
+   size1 = filesize(fw1);
+   if (fw2)
+      size2 = filesize(fw2);
+   if (size1 < 0 || size2 < 0)
+      return NULL;
+
+   dec->vp_fw2_offset = align(size1, 0x100);
+
+   ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 0, dec->vp_fw2_offset + size2, NULL, &fw);
+   if (ret) return NULL;
+   ret = nouveau_bo_map(fw, NOUVEAU_BO_WR, dec->client);
+   if (ret) goto error;
+
+   ret = nv84_copy_firmware(fw1, fw->map, size1);
+   if (fw2 && !ret)
+      ret = nv84_copy_firmware(fw2, fw->map + dec->vp_fw2_offset, size2);
+   munmap(fw->map, fw->size);
+   fw->map = NULL;
+   if (!ret) return fw;
+error:
+   nouveau_bo_ref(NULL, &fw);
+   return NULL;
+}
+
+static struct nouveau_bo *
+nv84_load_bsp_firmware(struct nouveau_device *dev, struct nv84_decoder *dec)
+{
+   return nv84_load_firmwares(
+         dev, dec, "/lib/firmware/nouveau/nv84_bsp-h264", NULL);
+}
+
+static struct nouveau_bo *
+nv84_load_vp_firmware(struct nouveau_device *dev, struct nv84_decoder *dec)
+{
+   return nv84_load_firmwares(
+         dev, dec,
+         "/lib/firmware/nouveau/nv84_vp-h264-1",
+         "/lib/firmware/nouveau/nv84_vp-h264-2");
+}
+
+static struct nouveau_bo *
+nv84_load_vp_firmware_mpeg(struct nouveau_device *dev, struct nv84_decoder *dec)
+{
+   return nv84_load_firmwares(
+         dev, dec, "/lib/firmware/nouveau/nv84_vp-mpeg12", NULL);
+}
+
+static void
+nv84_decoder_decode_bitstream_h264(struct pipe_video_decoder *decoder,
+                                   struct pipe_video_buffer *video_target,
+                                   struct pipe_picture_desc *picture,
+                                   unsigned num_buffers,
+                                   const void *const *data,
+                                   const unsigned *num_bytes)
+{
+   struct nv84_decoder *dec = (struct nv84_decoder *)decoder;
+   struct nv84_video_buffer *target = (struct nv84_video_buffer *)video_target;
+
+   struct pipe_h264_picture_desc *desc = (struct pipe_h264_picture_desc *)picture;
+
+   assert(target->base.buffer_format == PIPE_FORMAT_NV12);
+
+   nv84_decoder_bsp(dec, desc, num_buffers, data, num_bytes, target);
+   nv84_decoder_vp_h264(dec, desc, target);
+}
+
+static void
+nv84_decoder_flush(struct pipe_video_decoder *decoder)
+{
+}
+
+static void
+nv84_decoder_begin_frame_h264(struct pipe_video_decoder *decoder,
+                              struct pipe_video_buffer *target,
+                              struct pipe_picture_desc *picture)
+{
+}
+
+static void
+nv84_decoder_end_frame_h264(struct pipe_video_decoder *decoder,
+                            struct pipe_video_buffer *target,
+                            struct pipe_picture_desc *picture)
+{
+}
+
+static void
+nv84_decoder_decode_bitstream_mpeg12(struct pipe_video_decoder *decoder,
+                                     struct pipe_video_buffer *video_target,
+                                     struct pipe_picture_desc *picture,
+                                     unsigned num_buffers,
+                                     const void *const *data,
+                                     const unsigned *num_bytes)
+{
+   struct nv84_decoder *dec = (struct nv84_decoder *)decoder;
+   struct nv84_video_buffer *target = (struct nv84_video_buffer *)video_target;
+
+   struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
+
+   assert(target->base.buffer_format == PIPE_FORMAT_NV12);
+
+   vl_mpg12_bs_decode(dec->mpeg12_bs,
+                      video_target,
+                      desc,
+                      num_buffers,
+                      data,
+                      num_bytes);
+}
+
+static void
+nv84_decoder_begin_frame_mpeg12(struct pipe_video_decoder *decoder,
+                              struct pipe_video_buffer *target,
+                              struct pipe_picture_desc *picture)
+{
+   struct nv84_decoder *dec = (struct nv84_decoder *)decoder;
+   struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
+   int i;
+
+   nouveau_bo_wait(dec->mpeg12_bo, NOUVEAU_BO_RDWR, dec->client);
+   dec->mpeg12_mb_info = dec->mpeg12_bo->map + 0x100;
+   dec->mpeg12_data = dec->mpeg12_bo->map + 0x100 +
+      align(0x20 * mb(dec->base.width) * mb(dec->base.height), 0x100);
+   if (desc->intra_matrix) {
+      dec->zscan = desc->alternate_scan ? vl_zscan_alternate : vl_zscan_normal;
+      for (i = 0; i < 64; i++) {
+         dec->mpeg12_intra_matrix[i] = desc->intra_matrix[dec->zscan[i]];
+         dec->mpeg12_non_intra_matrix[i] = desc->non_intra_matrix[dec->zscan[i]];
+      }
+      dec->mpeg12_intra_matrix[0] = 1 << (7 - desc->intra_dc_precision);
+   }
+}
+
+static void
+nv84_decoder_end_frame_mpeg12(struct pipe_video_decoder *decoder,
+                              struct pipe_video_buffer *target,
+                              struct pipe_picture_desc *picture)
+{
+   nv84_decoder_vp_mpeg12(
+         (struct nv84_decoder *)decoder,
+         (struct pipe_mpeg12_picture_desc *)picture,
+         (struct nv84_video_buffer *)target);
+}
+
+static void
+nv84_decoder_decode_macroblock(struct pipe_video_decoder *decoder,
+                               struct pipe_video_buffer *target,
+                               struct pipe_picture_desc *picture,
+                               const struct pipe_macroblock *macroblocks,
+                               unsigned num_macroblocks)
+{
+   const struct pipe_mpeg12_macroblock *mb = (const struct pipe_mpeg12_macroblock *)macroblocks;
+   for (int i = 0; i < num_macroblocks; i++, mb++) {
+      nv84_decoder_vp_mpeg12_mb(
+            (struct nv84_decoder *)decoder,
+            (struct pipe_mpeg12_picture_desc *)picture,
+            mb);
+   }
+}
+
+static void
+nv84_decoder_destroy(struct pipe_video_decoder *decoder)
+{
+   struct nv84_decoder *dec = (struct nv84_decoder *)decoder;
+
+   nouveau_bo_ref(NULL, &dec->bsp_fw);
+   nouveau_bo_ref(NULL, &dec->bsp_data);
+   nouveau_bo_ref(NULL, &dec->vp_fw);
+   nouveau_bo_ref(NULL, &dec->vp_data);
+   nouveau_bo_ref(NULL, &dec->mbring);
+   nouveau_bo_ref(NULL, &dec->vpring);
+   nouveau_bo_ref(NULL, &dec->bitstream);
+   nouveau_bo_ref(NULL, &dec->vp_params);
+   nouveau_bo_ref(NULL, &dec->fence);
+
+   nouveau_object_del(&dec->bsp);
+   nouveau_object_del(&dec->vp);
+
+   nouveau_bufctx_del(&dec->bsp_bufctx);
+   nouveau_pushbuf_del(&dec->bsp_pushbuf);
+   nouveau_object_del(&dec->bsp_channel);
+
+   nouveau_bufctx_del(&dec->vp_bufctx);
+   nouveau_pushbuf_del(&dec->vp_pushbuf);
+   nouveau_object_del(&dec->vp_channel);
+
+   nouveau_client_del(&dec->client);
+
+   if (dec->mpeg12_bs)
+      FREE(dec->mpeg12_bs);
+   FREE(dec);
+}
+
+struct pipe_video_decoder *
+nv84_create_decoder(struct pipe_context *context,
+                    enum pipe_video_profile profile,
+                    enum pipe_video_entrypoint entrypoint,
+                    enum pipe_video_chroma_format chroma_format,
+                    unsigned width, unsigned height,
+                    unsigned max_references,
+                    bool chunked_decode)
+{
+   struct nv50_context *nv50 = (struct nv50_context *)context;
+   struct nouveau_screen *screen = &nv50->screen->base;
+   struct nv84_decoder *dec;
+   struct nouveau_pushbuf *bsp_push, *vp_push;
+   struct nv50_surface surf;
+   struct nv50_miptree mip;
+   union pipe_color_union color;
+   struct nv04_fifo nv04_data = { .vram = 0xbeef0201, .gart = 0xbeef0202 };
+   int ret, i;
+   int is_h264 = u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG4_AVC;
+   int is_mpeg12 = u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12;
+   struct nouveau_pushbuf_refn fence_ref[] = {
+      { NULL, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+   };
+
+
+   if (getenv("XVMC_VL"))
+      return vl_create_decoder(context, profile, entrypoint,
+                               chroma_format, width, height,
+                               max_references, chunked_decode);
+
+   if ((is_h264 && entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM) ||
+       (is_mpeg12 && entrypoint > PIPE_VIDEO_ENTRYPOINT_IDCT)) {
+      debug_printf("%x\n", entrypoint);
+      return NULL;
+   }
+
+   if (!is_h264 && !is_mpeg12) {
+      debug_printf("invalid profile: %x\n", profile);
+      return NULL;
+   }
+
+   dec = CALLOC_STRUCT(nv84_decoder);
+   if (!dec) return NULL;
+
+   dec->base.context = context;
+   dec->base.profile = profile;
+   dec->base.entrypoint = entrypoint;
+   dec->base.chroma_format = chroma_format;
+   dec->base.width = width;
+   dec->base.height = height;
+   dec->base.max_references = max_references;
+   dec->base.destroy = nv84_decoder_destroy;
+   dec->base.flush = nv84_decoder_flush;
+   if (is_h264) {
+      dec->base.decode_bitstream = nv84_decoder_decode_bitstream_h264;
+      dec->base.begin_frame = nv84_decoder_begin_frame_h264;
+      dec->base.end_frame = nv84_decoder_end_frame_h264;
+
+      dec->frame_mbs = mb(dec->base.width) * mb_half(dec->base.height) * 2;
+      dec->frame_size = dec->frame_mbs << 8;
+      dec->vpring_deblock = align(0x30 * dec->frame_mbs, 0x100);
+      dec->vpring_residual = 0x2000 + MAX2(0x32000, 0x600 * dec->frame_mbs);
+      dec->vpring_ctrl = MAX2(0x10000, align(0x1080 + 0x144 * dec->frame_mbs, 0x100));
+   } else if (is_mpeg12) {
+      dec->base.decode_macroblock = nv84_decoder_decode_macroblock;
+      dec->base.begin_frame = nv84_decoder_begin_frame_mpeg12;
+      dec->base.end_frame = nv84_decoder_end_frame_mpeg12;
+
+      if (entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+         dec->mpeg12_bs = CALLOC_STRUCT(vl_mpg12_bs);
+         if (!dec->mpeg12_bs)
+            goto fail;
+         vl_mpg12_bs_init(dec->mpeg12_bs, &dec->base);
+         dec->base.decode_bitstream = nv84_decoder_decode_bitstream_mpeg12;
+      }
+   } else {
+      goto fail;
+   }
+
+   ret = nouveau_client_new(screen->device, &dec->client);
+   if (ret)
+      goto fail;
+
+   if (is_h264) {
+      ret = nouveau_object_new(&screen->device->object, 0,
+                               NOUVEAU_FIFO_CHANNEL_CLASS,
+                               &nv04_data, sizeof(nv04_data), &dec->bsp_channel);
+      if (ret)
+         goto fail;
+
+      ret = nouveau_pushbuf_new(dec->client, dec->bsp_channel, 4,
+                                32 * 1024, true, &dec->bsp_pushbuf);
+      if (ret)
+         goto fail;
+
+      if (nouveau_bufctx_new(dec->client, 1, &dec->bsp_bufctx))
+         goto fail;
+   }
+
+   ret = nouveau_object_new(&screen->device->object, 0,
+                            NOUVEAU_FIFO_CHANNEL_CLASS,
+                            &nv04_data, sizeof(nv04_data), &dec->vp_channel);
+   if (ret)
+      goto fail;
+   ret = nouveau_pushbuf_new(dec->client, dec->vp_channel, 4,
+                             32 * 1024, true, &dec->vp_pushbuf);
+   if (ret)
+      goto fail;
+
+   if (nouveau_bufctx_new(dec->client, 1, &dec->vp_bufctx))
+      goto fail;
+
+   bsp_push = dec->bsp_pushbuf;
+   vp_push = dec->vp_pushbuf;
+
+   if (is_h264) {
+      dec->bsp_fw = nv84_load_bsp_firmware(screen->device, dec);
+      dec->vp_fw = nv84_load_vp_firmware(screen->device, dec);
+      if (!dec->bsp_fw || !dec->vp_fw)
+         goto fail;
+   }
+   if (is_mpeg12) {
+      dec->vp_fw = nv84_load_vp_firmware_mpeg(screen->device, dec);
+      if (!dec->vp_fw)
+         goto fail;
+   }
+
+   if (is_h264) {
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP,
+                           0, 0x40000, NULL, &dec->bsp_data);
+      if (ret)
+         goto fail;
+   }
+   ret = nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP,
+                        0, 0x40000, NULL, &dec->vp_data);
+   if (ret)
+      goto fail;
+   if (is_h264) {
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP,
+                           0,
+                           2 * (dec->vpring_deblock +
+                                dec->vpring_residual +
+                                dec->vpring_ctrl +
+                                0x1000),
+                           NULL, &dec->vpring);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP,
+                           0,
+                           (max_references + 1) * dec->frame_mbs * 0x40 +
+                           dec->frame_size + 0x2000,
+                           NULL, &dec->mbring);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_GART,
+                           0, 2 * (0x700 + MAX2(0x40000, 0x800 + 0x180 * dec->frame_mbs)),
+                           NULL, &dec->bitstream);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_map(dec->bitstream, NOUVEAU_BO_WR, dec->client);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_GART,
+                           0, 0x2000, NULL, &dec->vp_params);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_map(dec->vp_params, NOUVEAU_BO_WR, dec->client);
+      if (ret)
+         goto fail;
+   }
+   if (is_mpeg12) {
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_GART,
+                           0,
+                           align(0x20 * mb(width) * mb(height), 0x100) +
+                           (6 * 64 * 8) * mb(width) * mb(height) + 0x100,
+                           NULL, &dec->mpeg12_bo);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_map(dec->mpeg12_bo, NOUVEAU_BO_WR, dec->client);
+      if (ret)
+         goto fail;
+   }
+
+   ret = nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM,
+                        0, 0x1000, NULL, &dec->fence);
+   if (ret)
+      goto fail;
+   ret = nouveau_bo_map(dec->fence, NOUVEAU_BO_WR, dec->client);
+   if (ret)
+      goto fail;
+   *(uint32_t *)dec->fence->map = 0;
+
+   if (is_h264) {
+      nouveau_pushbuf_bufctx(bsp_push, dec->bsp_bufctx);
+      nouveau_bufctx_refn(dec->bsp_bufctx, 0, dec->bsp_fw, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
+      nouveau_bufctx_refn(dec->bsp_bufctx, 0, dec->bsp_data, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
+   }
+
+   nouveau_pushbuf_bufctx(vp_push, dec->vp_bufctx);
+   nouveau_bufctx_refn(dec->vp_bufctx, 0, dec->vp_fw, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
+   nouveau_bufctx_refn(dec->vp_bufctx, 0, dec->vp_data, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
+
+   if (is_h264 && !ret)
+      ret = nouveau_object_new(dec->bsp_channel, 0xbeef74b0, 0x74b0, NULL, 0, &dec->bsp);
+
+   if (!ret)
+      ret = nouveau_object_new(dec->vp_channel, 0xbeef7476, 0x7476, NULL, 0, &dec->vp);
+
+   if (ret)
+      goto fail;
+
+
+   if (is_h264) {
+      /* Zero out some parts of mbring/vpring. there's gotta be some cleaner way
+       * of doing this... perhaps makes sense to just copy the relevant logic
+       * here. */
+      color.f[0] = color.f[1] = color.f[2] = color.f[3] = 0;
+      surf.offset = dec->frame_size;
+      surf.width = 64;
+      surf.height = (max_references + 1) * dec->frame_mbs / 4;
+      surf.depth = 1;
+      surf.base.format = PIPE_FORMAT_B8G8R8A8_UNORM;
+      surf.base.u.tex.level = 0;
+      surf.base.texture = &mip.base.base;
+      mip.level[0].tile_mode = 0;
+      mip.level[0].pitch = surf.width * 4;
+      mip.base.domain = NOUVEAU_BO_VRAM;
+      mip.base.bo = dec->mbring;
+      context->clear_render_target(context, (struct pipe_surface *)&surf, &color, 0, 0, 64, 4760);
+      surf.offset = dec->vpring->size / 2 - 0x1000;
+      surf.width = 1024;
+      surf.height = 1;
+      mip.level[0].pitch = surf.width * 4;
+      mip.base.bo = dec->vpring;
+      context->clear_render_target(context, (struct pipe_surface *)&surf, &color, 0, 0, 1024, 1);
+      surf.offset = dec->vpring->size - 0x1000;
+      context->clear_render_target(context, (struct pipe_surface *)&surf, &color, 0, 0, 1024, 1);
+
+      PUSH_SPACE(screen->pushbuf, 5);
+      fence_ref[0].bo = dec->fence;
+      nouveau_pushbuf_refn(screen->pushbuf, fence_ref, 1);
+      /* The clear_render_target is done via 3D engine, so use it to write to a
+       * sempahore to indicate that it's done.
+       */
+      BEGIN_NV04(screen->pushbuf, SUBC_3D(0x1b00), 4);
+      PUSH_DATAh(screen->pushbuf, dec->fence->offset);
+      PUSH_DATA (screen->pushbuf, dec->fence->offset);
+      PUSH_DATA (screen->pushbuf, 1);
+      PUSH_DATA (screen->pushbuf, 0xf010);
+      PUSH_KICK (screen->pushbuf);
+
+      PUSH_SPACE(bsp_push, 2 + 12 + 2 + 4 + 3);
+
+      BEGIN_NV04(bsp_push, SUBC_BSP(NV01_SUBCHAN_OBJECT), 1);
+      PUSH_DATA (bsp_push, dec->bsp->handle);
+
+      BEGIN_NV04(bsp_push, SUBC_BSP(0x180), 11);
+      for (i = 0; i < 11; i++)
+         PUSH_DATA(bsp_push, nv04_data.vram);
+      BEGIN_NV04(bsp_push, SUBC_BSP(0x1b8), 1);
+      PUSH_DATA (bsp_push, nv04_data.vram);
+
+      BEGIN_NV04(bsp_push, SUBC_BSP(0x600), 3);
+      PUSH_DATAh(bsp_push, dec->bsp_fw->offset);
+      PUSH_DATA (bsp_push, dec->bsp_fw->offset);
+      PUSH_DATA (bsp_push, dec->bsp_fw->size);
+
+      BEGIN_NV04(bsp_push, SUBC_BSP(0x628), 2);
+      PUSH_DATA (bsp_push, dec->bsp_data->offset >> 8);
+      PUSH_DATA (bsp_push, dec->bsp_data->size);
+      PUSH_KICK (bsp_push);
+   }
+
+   PUSH_SPACE(vp_push, 2 + 12 + 2 + 4 + 3);
+
+   BEGIN_NV04(vp_push, SUBC_VP(NV01_SUBCHAN_OBJECT), 1);
+   PUSH_DATA (vp_push, dec->vp->handle);
+
+   BEGIN_NV04(vp_push, SUBC_VP(0x180), 11);
+   for (i = 0; i < 11; i++)
+      PUSH_DATA(vp_push, nv04_data.vram);
+
+   BEGIN_NV04(vp_push, SUBC_VP(0x1b8), 1);
+   PUSH_DATA (vp_push, nv04_data.vram);
+
+   BEGIN_NV04(vp_push, SUBC_VP(0x600), 3);
+   PUSH_DATAh(vp_push, dec->vp_fw->offset);
+   PUSH_DATA (vp_push, dec->vp_fw->offset);
+   PUSH_DATA (vp_push, dec->vp_fw->size);
+
+   BEGIN_NV04(vp_push, SUBC_VP(0x628), 2);
+   PUSH_DATA (vp_push, dec->vp_data->offset >> 8);
+   PUSH_DATA (vp_push, dec->vp_data->size);
+   PUSH_KICK (vp_push);
+
+   return &dec->base;
+fail:
+   nv84_decoder_destroy(&dec->base);
+   return NULL;
+}
+
+static struct pipe_sampler_view **
+nv84_video_buffer_sampler_view_planes(struct pipe_video_buffer *buffer)
+{
+   struct nv84_video_buffer *buf = (struct nv84_video_buffer *)buffer;
+   return buf->sampler_view_planes;
+}
+
+static struct pipe_sampler_view **
+nv84_video_buffer_sampler_view_components(struct pipe_video_buffer *buffer)
+{
+   struct nv84_video_buffer *buf = (struct nv84_video_buffer *)buffer;
+   return buf->sampler_view_components;
+}
+
+static struct pipe_surface **
+nv84_video_buffer_surfaces(struct pipe_video_buffer *buffer)
+{
+   struct nv84_video_buffer *buf = (struct nv84_video_buffer *)buffer;
+   return buf->surfaces;
+}
+
+static void
+nv84_video_buffer_destroy(struct pipe_video_buffer *buffer)
+{
+   struct nv84_video_buffer *buf = (struct nv84_video_buffer *)buffer;
+   unsigned i;
+
+   assert(buf);
+
+   for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
+      pipe_resource_reference(&buf->resources[i], NULL);
+      pipe_sampler_view_reference(&buf->sampler_view_planes[i], NULL);
+      pipe_sampler_view_reference(&buf->sampler_view_components[i], NULL);
+      pipe_surface_reference(&buf->surfaces[i * 2], NULL);
+      pipe_surface_reference(&buf->surfaces[i * 2 + 1], NULL);
+   }
+
+   nouveau_bo_ref(NULL, &buf->interlaced);
+   nouveau_bo_ref(NULL, &buf->full);
+
+   FREE(buffer);
+}
+
+struct pipe_video_buffer *
+nv84_video_buffer_create(struct pipe_context *pipe,
+                         const struct pipe_video_buffer *template)
+{
+   struct nv84_video_buffer *buffer;
+   struct pipe_resource templ;
+   unsigned i, j, component;
+   struct pipe_sampler_view sv_templ;
+   struct pipe_surface surf_templ;
+   struct nv50_miptree *mt0, *mt1;
+   struct nouveau_bo *empty = NULL;
+   struct nouveau_screen *screen = &((struct nv50_context *)pipe)->screen->base;
+   union nouveau_bo_config cfg;
+   unsigned bo_size;
+
+   if (getenv("XVMC_VL"))
+      return vl_video_buffer_create(pipe, template);
+
+   if (!template->interlaced) {
+      debug_printf("Require interlaced video buffers\n");
+      return NULL;
+   }
+   if (template->buffer_format != PIPE_FORMAT_NV12) {
+      debug_printf("Must use NV12 format\n");
+      return NULL;
+   }
+   if (template->chroma_format != PIPE_VIDEO_CHROMA_FORMAT_420) {
+      debug_printf("Must use 4:2:0 format\n");
+      return NULL;
+   }
+
+   buffer = CALLOC_STRUCT(nv84_video_buffer);
+   if (!buffer) return NULL;
+
+   buffer->mvidx = -1;
+
+   buffer->base.buffer_format = template->buffer_format;
+   buffer->base.context = pipe;
+   buffer->base.destroy = nv84_video_buffer_destroy;
+   buffer->base.chroma_format = template->chroma_format;
+   buffer->base.width = template->width;
+   buffer->base.height = template->height;
+   buffer->base.get_sampler_view_planes = nv84_video_buffer_sampler_view_planes;
+   buffer->base.get_sampler_view_components = nv84_video_buffer_sampler_view_components;
+   buffer->base.get_surfaces = nv84_video_buffer_surfaces;
+   buffer->base.interlaced = true;
+
+   memset(&templ, 0, sizeof(templ));
+   templ.target = PIPE_TEXTURE_2D_ARRAY;
+   templ.depth0 = 1;
+   templ.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
+   templ.format = PIPE_FORMAT_R8_UNORM;
+   templ.width0 = align(template->width, 2);
+   templ.height0 = align(template->height, 4) / 2;
+   templ.flags = NV50_RESOURCE_FLAG_VIDEO;
+   templ.array_size = 2;
+
+   cfg.nv50.tile_mode = 0x20;
+   cfg.nv50.memtype = 0x70;
+
+   buffer->resources[0] = pipe->screen->resource_create(pipe->screen, &templ);
+   if (!buffer->resources[0])
+      goto error;
+
+   templ.format = PIPE_FORMAT_R8G8_UNORM;
+   templ.width0 /= 2;
+   templ.height0 /= 2;
+   buffer->resources[1] = pipe->screen->resource_create(pipe->screen, &templ);
+   if (!buffer->resources[1])
+      goto error;
+
+   mt0 = nv50_miptree(buffer->resources[0]);
+   mt1 = nv50_miptree(buffer->resources[1]);
+
+   bo_size = mt0->total_size + mt1->total_size;
+   if (nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP, 0,
+                      bo_size, &cfg, &buffer->interlaced))
+      goto error;
+   /* XXX Change reference frame management so that this is only allocated in
+    * the decoder when necessary. */
+   if (nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP, 0,
+                      bo_size, &cfg, &buffer->full))
+      goto error;
+
+   mt0->base.bo = buffer->interlaced;
+   mt0->base.domain = NOUVEAU_BO_VRAM;
+   mt0->base.offset = 0;
+   mt0->base.address = buffer->interlaced->offset;
+   nouveau_bo_ref(buffer->interlaced, &empty);
+
+   mt1->base.bo = buffer->interlaced;
+   mt1->base.domain = NOUVEAU_BO_VRAM;
+   mt1->base.offset = mt0->layer_stride * 2;
+   mt1->base.address = buffer->interlaced->offset + mt0->layer_stride * 2;
+   nouveau_bo_ref(buffer->interlaced, &empty);
+
+   memset(&sv_templ, 0, sizeof(sv_templ));
+   for (component = 0, i = 0; i < 2; ++i ) {
+      struct pipe_resource *res = buffer->resources[i];
+      unsigned nr_components = util_format_get_nr_components(res->format);
+
+      u_sampler_view_default_template(&sv_templ, res, res->format);
+      buffer->sampler_view_planes[i] = pipe->create_sampler_view(pipe, res, &sv_templ);
+      if (!buffer->sampler_view_planes[i])
+         goto error;
+
+      for (j = 0; j < nr_components; ++j, ++component) {
+         sv_templ.swizzle_r = sv_templ.swizzle_g = sv_templ.swizzle_b = PIPE_SWIZZLE_RED + j;
+         sv_templ.swizzle_a = PIPE_SWIZZLE_ONE;
+
+         buffer->sampler_view_components[component] = pipe->create_sampler_view(pipe, res, &sv_templ);
+         if (!buffer->sampler_view_components[component])
+            goto error;
+      }
+   }
+
+   memset(&surf_templ, 0, sizeof(surf_templ));
+   for (j = 0; j < 2; ++j) {
+      surf_templ.format = buffer->resources[j]->format;
+      surf_templ.u.tex.first_layer = surf_templ.u.tex.last_layer = 0;
+      buffer->surfaces[j * 2] = pipe->create_surface(pipe, buffer->resources[j], &surf_templ);
+      if (!buffer->surfaces[j * 2])
+         goto error;
+
+      surf_templ.u.tex.first_layer = surf_templ.u.tex.last_layer = 1;
+      buffer->surfaces[j * 2 + 1] = pipe->create_surface(pipe, buffer->resources[j], &surf_templ);
+      if (!buffer->surfaces[j * 2 + 1])
+         goto error;
+   }
+
+   return &buffer->base;
+
+error:
+   nv84_video_buffer_destroy(&buffer->base);
+   return NULL;
+}
+
+int
+nv84_screen_get_video_param(struct pipe_screen *pscreen,
+                            enum pipe_video_profile profile,
+                            enum pipe_video_cap param)
+{
+   switch (param) {
+   case PIPE_VIDEO_CAP_SUPPORTED:
+      return u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG4_AVC ||
+         u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12;
+   case PIPE_VIDEO_CAP_NPOT_TEXTURES:
+      return 1;
+   case PIPE_VIDEO_CAP_MAX_WIDTH:
+   case PIPE_VIDEO_CAP_MAX_HEIGHT:
+      return 2048;
+   case PIPE_VIDEO_CAP_PREFERED_FORMAT:
+      return PIPE_FORMAT_NV12;
+   case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED:
+   case PIPE_VIDEO_CAP_PREFERS_INTERLACED:
+      return true;
+   case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE:
+      return false;
+   default:
+      debug_printf("unknown video param: %d\n", param);
+      return 0;
+   }
+}
+
+boolean
+nv84_screen_video_supported(struct pipe_screen *screen,
+                            enum pipe_format format,
+                            enum pipe_video_profile profile)
+{
+   return format == PIPE_FORMAT_NV12;
+}
diff --git a/src/gallium/drivers/nv50/nv84_video.h b/src/gallium/drivers/nv50/nv84_video.h
new file mode 100644
index 0000000..4ff8cf3
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv84_video.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2013 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef NV84_VIDEO_H_
+#define NV84_VIDEO_H_
+
+#include "vl/vl_decoder.h"
+#include "vl/vl_video_buffer.h"
+#include "vl/vl_types.h"
+
+#include "vl/vl_mpeg12_bitstream.h"
+
+#include "util/u_video.h"
+
+#include "nv50_context.h"
+
+union pipe_desc {
+   struct pipe_picture_desc *base;
+   struct pipe_mpeg12_picture_desc *mpeg12;
+   struct pipe_mpeg4_picture_desc *mpeg4;
+   struct pipe_vc1_picture_desc *vc1;
+   struct pipe_h264_picture_desc *h264;
+};
+
+struct nv84_video_buffer {
+   struct pipe_video_buffer base;
+   struct pipe_resource *resources[VL_NUM_COMPONENTS];
+   struct pipe_sampler_view *sampler_view_planes[VL_NUM_COMPONENTS];
+   struct pipe_sampler_view *sampler_view_components[VL_NUM_COMPONENTS];
+   struct pipe_surface *surfaces[VL_NUM_COMPONENTS * 2];
+
+   struct nouveau_bo *interlaced, *full;
+   int mvidx;
+   unsigned frame_num, frame_num_max;
+};
+
+struct nv84_decoder {
+   struct pipe_video_decoder base;
+   struct nouveau_client *client;
+   struct nouveau_object *bsp_channel, *vp_channel, *bsp, *vp;
+   struct nouveau_pushbuf *bsp_pushbuf, *vp_pushbuf;
+   struct nouveau_bufctx *bsp_bufctx, *vp_bufctx;
+
+   struct nouveau_bo *bsp_fw, *bsp_data;
+   struct nouveau_bo *vp_fw, *vp_data;
+   struct nouveau_bo *mbring, *vpring;
+
+   /*
+    * states:
+    *  0: init
+    *  1: vpring/mbring cleared, bsp is ready
+    *  2: bsp is done, vp is ready
+    * and then vp it back to 1
+    */
+   struct nouveau_bo *fence;
+
+   struct nouveau_bo *bitstream;
+   struct nouveau_bo *vp_params;
+
+   size_t vp_fw2_offset;
+
+   unsigned frame_mbs, frame_size;
+   /* VPRING layout:
+        RESIDUAL
+        CTRL
+        DEBLOCK
+        0x1000
+   */
+   unsigned vpring_deblock, vpring_residual, vpring_ctrl;
+
+
+   struct vl_mpg12_bs *mpeg12_bs;
+
+   struct nouveau_bo *mpeg12_bo;
+   void *mpeg12_mb_info;
+   uint16_t *mpeg12_data;
+   const int *zscan;
+   uint8_t mpeg12_intra_matrix[64];
+   uint8_t mpeg12_non_intra_matrix[64];
+};
+
+static INLINE uint32_t mb(uint32_t coord)
+{
+   return (coord + 0xf)>>4;
+}
+
+static INLINE uint32_t mb_half(uint32_t coord)
+{
+   return (coord + 0x1f)>>5;
+}
+
+int
+nv84_decoder_bsp(struct nv84_decoder *dec,
+                 struct pipe_h264_picture_desc *desc,
+                 unsigned num_buffers,
+                 const void *const *data,
+                 const unsigned *num_bytes,
+                 struct nv84_video_buffer *dest);
+
+void
+nv84_decoder_vp_h264(struct nv84_decoder *dec,
+                     struct pipe_h264_picture_desc *desc,
+                     struct nv84_video_buffer *dest);
+
+void
+nv84_decoder_vp_mpeg12_mb(struct nv84_decoder *dec,
+                          struct pipe_mpeg12_picture_desc *desc,
+                          const struct pipe_mpeg12_macroblock *mb);
+
+void
+nv84_decoder_vp_mpeg12(struct nv84_decoder *dec,
+                       struct pipe_mpeg12_picture_desc *desc,
+                       struct nv84_video_buffer *dest);
+
+#endif
diff --git a/src/gallium/drivers/nv50/nv84_video_bsp.c b/src/gallium/drivers/nv50/nv84_video_bsp.c
new file mode 100644
index 0000000..7885210
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv84_video_bsp.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2013 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "nv84_video.h"
+
+struct iparm {
+   struct iseqparm {
+      uint32_t chroma_format_idc; // 00
+      uint32_t pad[(0x128 - 0x4) / 4];
+      uint32_t log2_max_frame_num_minus4; // 128
+      uint32_t pic_order_cnt_type; // 12c
+      uint32_t log2_max_pic_order_cnt_lsb_minus4; // 130
+      uint32_t delta_pic_order_always_zero_flag; // 134
+      uint32_t num_ref_frames; // 138
+      uint32_t pic_width_in_mbs_minus1; // 13c
+      uint32_t pic_height_in_map_units_minus1; // 140
+      uint32_t frame_mbs_only_flag; // 144
+      uint32_t mb_adaptive_frame_field_flag; // 148
+      uint32_t direct_8x8_inference_flag; // 14c
+   } iseqparm; // 000
+   struct ipicparm {
+      uint32_t entropy_coding_mode_flag; // 00
+      uint32_t pic_order_present_flag; // 04
+      uint32_t num_slice_groups_minus1; // 08
+      uint32_t slice_group_map_type; // 0c
+      uint32_t pad1[0x60 / 4];
+      uint32_t u70; // 70
+      uint32_t u74; // 74
+      uint32_t u78; // 78
+      uint32_t num_ref_idx_l0_active_minus1; // 7c
+      uint32_t num_ref_idx_l1_active_minus1; // 80
+      uint32_t weighted_pred_flag; // 84
+      uint32_t weighted_bipred_idc; // 88
+      uint32_t pic_init_qp_minus26; // 8c
+      uint32_t chroma_qp_index_offset; // 90
+      uint32_t deblocking_filter_control_present_flag; // 94
+      uint32_t constrained_intra_pred_flag; // 98
+      uint32_t redundant_pic_cnt_present_flag; // 9c
+      uint32_t transform_8x8_mode_flag; // a0
+      uint32_t pad2[(0x1c8 - 0xa0 - 4) / 4];
+      uint32_t second_chroma_qp_index_offset; // 1c8
+      uint32_t u1cc; // 1cc
+      uint32_t curr_pic_order_cnt; // 1d0
+      uint32_t field_order_cnt[2]; // 1d4
+      uint32_t curr_mvidx; // 1dc
+      struct iref {
+         uint32_t u00; // 00
+         uint32_t field_is_ref; // 04 // bit0: top, bit1: bottom
+         uint8_t is_long_term; // 08
+         uint8_t non_existing; // 09
+         uint32_t frame_idx; // 0c
+         uint32_t field_order_cnt[2]; // 10
+         uint32_t mvidx; // 18
+         uint8_t field_pic_flag; // 1c
+         // 20
+      } refs[0x10]; // 1e0
+   } ipicparm; // 150
+};
+
+int
+nv84_decoder_bsp(struct nv84_decoder *dec,
+                 struct pipe_h264_picture_desc *desc,
+                 unsigned num_buffers,
+                 const void *const *data,
+                 const unsigned *num_bytes,
+                 struct nv84_video_buffer *dest)
+{
+   struct iparm params;
+   uint32_t more_params[0x44 / 4] = {0};
+   unsigned total_bytes = 0;
+   int i;
+   static const uint32_t end[] = {0x0b010000, 0, 0x0b010000, 0};
+   char indexes[17] = {0};
+   struct nouveau_pushbuf *push = dec->bsp_pushbuf;
+   struct nouveau_pushbuf_refn bo_refs[] = {
+      { dec->vpring, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->mbring, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->bitstream, NOUVEAU_BO_RDWR | NOUVEAU_BO_GART },
+      { dec->fence, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+   };
+
+   nouveau_bo_wait(dec->fence, NOUVEAU_BO_RDWR, dec->client);
+
+   STATIC_ASSERT(sizeof(struct iparm) == 0x530);
+
+   memset(&params, 0, sizeof(params));
+
+   dest->frame_num = dest->frame_num_max = desc->frame_num;
+
+   for (i = 0; i < 16; i++) {
+      struct iref *ref = &params.ipicparm.refs[i];
+      struct nv84_video_buffer *frame = (struct nv84_video_buffer *)desc->ref[i];
+      if (!frame) break;
+      /* The frame index is relative to the last IDR frame. So once the frame
+       * num goes back to 0, previous reference frames need to have a negative
+       * index.
+       */
+      if (desc->frame_num >= frame->frame_num_max) {
+         frame->frame_num_max = desc->frame_num;
+      } else {
+         frame->frame_num -= frame->frame_num_max + 1;
+         frame->frame_num_max = desc->frame_num;
+      }
+      ref->non_existing = 0;
+      ref->field_is_ref = (desc->top_is_reference[i] ? 1 : 0) |
+         (desc->bottom_is_reference[i] ? 2 : 0);
+      ref->is_long_term = desc->is_long_term[i];
+      ref->field_order_cnt[0] = desc->field_order_cnt_list[i][0];
+      ref->field_order_cnt[1] = desc->field_order_cnt_list[i][1];
+      ref->frame_idx = frame->frame_num;
+      ref->u00 = ref->mvidx = frame->mvidx;
+      ref->field_pic_flag = desc->field_pic_flag;
+      indexes[frame->mvidx] = 1;
+   }
+
+   /* Needs to be adjusted if we ever support non-4:2:0 videos */
+   params.iseqparm.chroma_format_idc = 1;
+
+   params.iseqparm.pic_width_in_mbs_minus1 = mb(dec->base.width) - 1;
+   if (desc->field_pic_flag)
+      params.iseqparm.pic_height_in_map_units_minus1 = mb_half(dec->base.height) - 1;
+   else
+      params.iseqparm.pic_height_in_map_units_minus1 = mb(dec->base.height) - 1;
+
+   /* TODO: interlaced still doesn't work, maybe due to ref frame management. */
+   if (desc->bottom_field_flag)
+      params.ipicparm.curr_pic_order_cnt = desc->field_order_cnt[1];
+   else
+      params.ipicparm.curr_pic_order_cnt = desc->field_order_cnt[0];
+   params.ipicparm.field_order_cnt[0] = desc->field_order_cnt[0];
+   params.ipicparm.field_order_cnt[1] = desc->field_order_cnt[1];
+   if (desc->is_reference) {
+      if (dest->mvidx < 0) {
+         for (i = 0; i < desc->num_ref_frames + 1; i++) {
+            if (!indexes[i]) {
+               dest->mvidx = i;
+               break;
+            }
+         }
+         assert(i != desc->num_ref_frames + 1);
+      }
+
+      params.ipicparm.u1cc = params.ipicparm.curr_mvidx = dest->mvidx;
+   }
+
+   params.iseqparm.num_ref_frames = desc->num_ref_frames;
+   params.iseqparm.mb_adaptive_frame_field_flag = desc->mb_adaptive_frame_field_flag;
+   params.ipicparm.constrained_intra_pred_flag = desc->constrained_intra_pred_flag;
+   params.ipicparm.weighted_pred_flag = desc->weighted_pred_flag;
+   params.ipicparm.weighted_bipred_idc = desc->weighted_bipred_idc;
+   params.iseqparm.frame_mbs_only_flag = desc->frame_mbs_only_flag;
+   params.ipicparm.transform_8x8_mode_flag = desc->transform_8x8_mode_flag;
+   params.ipicparm.chroma_qp_index_offset = desc->chroma_qp_index_offset;
+   params.ipicparm.second_chroma_qp_index_offset = desc->second_chroma_qp_index_offset;
+   params.ipicparm.pic_init_qp_minus26 = desc->pic_init_qp_minus26;
+   params.ipicparm.num_ref_idx_l0_active_minus1 = desc->num_ref_idx_l0_active_minus1;
+   params.ipicparm.num_ref_idx_l1_active_minus1 = desc->num_ref_idx_l1_active_minus1;
+   params.iseqparm.log2_max_frame_num_minus4 = desc->log2_max_frame_num_minus4;
+   params.iseqparm.pic_order_cnt_type = desc->pic_order_cnt_type;
+   params.iseqparm.log2_max_pic_order_cnt_lsb_minus4 = desc->log2_max_pic_order_cnt_lsb_minus4;
+   params.iseqparm.delta_pic_order_always_zero_flag = desc->delta_pic_order_always_zero_flag;
+   params.iseqparm.direct_8x8_inference_flag = desc->direct_8x8_inference_flag;
+   params.ipicparm.entropy_coding_mode_flag = desc->entropy_coding_mode_flag;
+   params.ipicparm.pic_order_present_flag = desc->pic_order_present_flag;
+   params.ipicparm.deblocking_filter_control_present_flag = desc->deblocking_filter_control_present_flag;
+   params.ipicparm.redundant_pic_cnt_present_flag = desc->redundant_pic_cnt_present_flag;
+
+   memcpy(dec->bitstream->map, &params, sizeof(params));
+   for (i = 0; i < num_buffers; i++) {
+      assert(total_bytes + num_bytes[i] < dec->bitstream->size / 2 - 0x700);
+      memcpy(dec->bitstream->map + 0x700 + total_bytes, data[i], num_bytes[i]);
+      total_bytes += num_bytes[i];
+   }
+   memcpy(dec->bitstream->map + 0x700 + total_bytes, end, sizeof(end));
+   total_bytes += sizeof(end);
+   more_params[1] = total_bytes;
+   memcpy(dec->bitstream->map + 0x600, more_params, sizeof(more_params));
+
+   PUSH_SPACE(push, 5 + 21 + 3 + 2 + 4 + 2);
+   nouveau_pushbuf_refn(push, bo_refs, sizeof(bo_refs)/sizeof(bo_refs[0]));
+
+   /* Wait for the fence = 1 */
+   BEGIN_NV04(push, SUBC_BSP(0x10), 4);
+   PUSH_DATAh(push, dec->fence->offset);
+   PUSH_DATA (push, dec->fence->offset);
+   PUSH_DATA (push, 1);
+   PUSH_DATA (push, 1);
+
+   /* TODO: Use both halves of bitstream/vpring for alternating frames */
+
+   /* Kick off the BSP */
+   BEGIN_NV04(push, SUBC_BSP(0x400), 20);
+   PUSH_DATA (push, dec->bitstream->offset >> 8);
+   PUSH_DATA (push, (dec->bitstream->offset >> 8) + 7);
+   PUSH_DATA (push, dec->bitstream->size / 2 - 0x700);
+   PUSH_DATA (push, (dec->bitstream->offset >> 8) + 6);
+   PUSH_DATA (push, 1);
+   PUSH_DATA (push, dec->mbring->offset >> 8);
+   PUSH_DATA (push, dec->frame_size);
+   PUSH_DATA (push, (dec->mbring->offset + dec->frame_size) >> 8);
+   PUSH_DATA (push, dec->vpring->offset >> 8);
+   PUSH_DATA (push, dec->vpring->size / 2);
+   PUSH_DATA (push, dec->vpring_residual);
+   PUSH_DATA (push, dec->vpring_ctrl);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, dec->vpring_residual);
+   PUSH_DATA (push, dec->vpring_residual + dec->vpring_ctrl);
+   PUSH_DATA (push, dec->vpring_deblock);
+   PUSH_DATA (push, (dec->vpring->offset + dec->vpring_ctrl +
+                     dec->vpring_residual + dec->vpring_deblock) >> 8);
+   PUSH_DATA (push, 0x654321);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, 0x100008);
+
+   BEGIN_NV04(push, SUBC_BSP(0x620), 2);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, 0);
+
+   BEGIN_NV04(push, SUBC_BSP(0x300), 1);
+   PUSH_DATA (push, 0);
+
+   /* Write fence = 2, intr */
+   BEGIN_NV04(push, SUBC_BSP(0x610), 3);
+   PUSH_DATAh(push, dec->fence->offset);
+   PUSH_DATA (push, dec->fence->offset);
+   PUSH_DATA (push, 2);
+
+   BEGIN_NV04(push, SUBC_BSP(0x304), 1);
+   PUSH_DATA (push, 0x101);
+   PUSH_KICK (push);
+   return 0;
+}
diff --git a/src/gallium/drivers/nv50/nv84_video_vp.c b/src/gallium/drivers/nv50/nv84_video_vp.c
new file mode 100644
index 0000000..60c0848
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv84_video_vp.c
@@ -0,0 +1,521 @@
+/*
+ * Copyright 2013 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <immintrin.h>
+
+#include "nv84_video.h"
+
+struct h264_iparm1 {
+   uint8_t scaling_lists_4x4[6][16]; // 00
+   uint8_t scaling_lists_8x8[2][64]; // 60
+   uint32_t width; // e0
+   uint32_t height; // e4
+   uint64_t ref1_addrs[16]; // e8
+   uint64_t ref2_addrs[16]; // 168
+   uint32_t unk1e8;
+   uint32_t unk1ec;
+   uint32_t w1; // 1f0
+   uint32_t w2; // 1f4
+   uint32_t w3; // 1f8
+   uint32_t h1; // 1fc
+   uint32_t h2; // 200
+   uint32_t h3; // 204
+   uint32_t unk208;
+   uint32_t field_pic_flag;
+   uint32_t format;
+   uint32_t unk214;
+};
+
+struct h264_iparm2 {
+   uint32_t width; // 00
+   uint32_t height; // 04
+   uint32_t mbs; // 08
+   uint32_t w1; // 0c
+   uint32_t w2; // 10
+   uint32_t w3; // 14
+   uint32_t h1; // 18
+   uint32_t h2; // 1c
+   uint32_t h3; // 20
+   uint32_t unk24;
+   uint32_t unk28;
+   uint32_t top; // 2c
+   uint32_t bottom; // 30
+   uint32_t is_reference; // 34
+};
+
+void
+nv84_decoder_vp_h264(struct nv84_decoder *dec,
+                     struct pipe_h264_picture_desc *desc,
+                     struct nv84_video_buffer *dest)
+{
+   struct h264_iparm1 param1;
+   struct h264_iparm2 param2;
+   int i, width = align(dest->base.width, 16),
+      height = align(dest->base.height, 16);
+
+   struct nouveau_pushbuf *push = dec->vp_pushbuf;
+   struct nouveau_pushbuf_refn bo_refs[] = {
+      { dest->interlaced, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dest->full, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->vpring, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->mbring, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->vp_params, NOUVEAU_BO_RDWR | NOUVEAU_BO_GART },
+      { dec->fence, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+   };
+   int num_refs = sizeof(bo_refs)/sizeof(*bo_refs);
+   bool is_ref = desc->is_reference;
+
+   STATIC_ASSERT(sizeof(struct h264_iparm1) == 0x218);
+   STATIC_ASSERT(sizeof(struct h264_iparm2) == 0x38);
+
+   memset(&param1, 0, sizeof(param1));
+   memset(&param2, 0, sizeof(param2));
+
+   memcpy(&param1.scaling_lists_4x4, desc->scaling_lists_4x4,
+          sizeof(param1.scaling_lists_4x4));
+   memcpy(&param1.scaling_lists_8x8, desc->scaling_lists_8x8,
+          sizeof(param1.scaling_lists_8x8));
+
+   param1.width = width;
+   param1.w1 = param1.w2 = param1.w3 = align(width, 64);
+   param1.height = param1.h2 = height;
+   param1.h1 = param1.h3 = align(height, 32);
+   param1.format = 0x3231564e; /* 'NV12' */
+   param1.field_pic_flag = desc->field_pic_flag;
+
+   param2.width = width;
+   param2.w1 = param2.w2 = param2.w3 = param1.w1;
+   if (desc->field_pic_flag)
+      param2.height = align(height, 32) / 2;
+   else
+      param2.height = height;
+   param2.h1 = param2.h2 = align(height, 32);
+   param2.h3 = height;
+   param2.mbs = width * height >> 8;
+   if (desc->field_pic_flag) {
+      param2.top = desc->bottom_field_flag ? 2 : 1;
+      param2.bottom = desc->bottom_field_flag;
+   }
+   param2.is_reference = desc->is_reference;
+
+   PUSH_SPACE(push, 5 + 16 + 3 + 2 + 6 + (is_ref ? 2 : 0) + 3 + 2 + 4 + 2);
+
+   struct nouveau_bo *ref2_default = dest->full;
+
+   for (i = 0; i < 16; i++) {
+      struct nv84_video_buffer *buf = (struct nv84_video_buffer *)desc->ref[i];
+      struct nouveau_bo *bo1, *bo2;
+      if (buf) {
+         bo1 = buf->interlaced;
+         bo2 = buf->full;
+         if (i == 0)
+            ref2_default = buf->full;
+      } else {
+         bo1 = dest->interlaced;
+         bo2 = ref2_default;
+      }
+      param1.ref1_addrs[i] = bo1->offset;
+      param1.ref2_addrs[i] = bo2->offset;
+      struct nouveau_pushbuf_refn bo_refs[] = {
+         { bo1, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+         { bo2, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      };
+      nouveau_pushbuf_refn(push, bo_refs, sizeof(bo_refs)/sizeof(bo_refs[0]));
+   }
+
+   memcpy(dec->vp_params->map, &param1, sizeof(param1));
+   memcpy(dec->vp_params->map + 0x400, &param2, sizeof(param2));
+
+   nouveau_pushbuf_refn(push, bo_refs, num_refs);
+
+   /* Wait for BSP to have completed */
+   BEGIN_NV04(push, SUBC_VP(0x10), 4);
+   PUSH_DATAh(push, dec->fence->offset);
+   PUSH_DATA (push, dec->fence->offset);
+   PUSH_DATA (push, 2);
+   PUSH_DATA (push, 1); /* wait for sem == 2 */
+
+   /* VP step 1 */
+   BEGIN_NV04(push, SUBC_VP(0x400), 15);
+   PUSH_DATA (push, 1);
+   PUSH_DATA (push, param2.mbs);
+   PUSH_DATA (push, 0x3987654); /* each nibble probably a dma index */
+   PUSH_DATA (push, 0x55001); /* constant */
+   PUSH_DATA (push, dec->vp_params->offset >> 8);
+   PUSH_DATA (push, (dec->vpring->offset + dec->vpring_residual) >> 8);
+   PUSH_DATA (push, dec->vpring_ctrl);
+   PUSH_DATA (push, dec->vpring->offset >> 8);
+   PUSH_DATA (push, dec->bitstream->size / 2 - 0x700);
+   PUSH_DATA (push, (dec->mbring->offset + dec->mbring->size - 0x2000) >> 8);
+   PUSH_DATA (push, (dec->vpring->offset + dec->vpring_ctrl +
+                     dec->vpring_residual + dec->vpring_deblock) >> 8);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, 0x100008);
+   PUSH_DATA (push, dest->interlaced->offset >> 8);
+   PUSH_DATA (push, 0);
+
+   BEGIN_NV04(push, SUBC_VP(0x620), 2);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, 0);
+
+   BEGIN_NV04(push, SUBC_VP(0x300), 1);
+   PUSH_DATA (push, 0);
+
+   /* VP step 2 */
+   BEGIN_NV04(push, SUBC_VP(0x400), 5);
+   PUSH_DATA (push, 0x54530201);
+   PUSH_DATA (push, (dec->vp_params->offset >> 8) + 0x4);
+   PUSH_DATA (push, (dec->vpring->offset + dec->vpring_ctrl +
+                     dec->vpring_residual) >> 8);
+   PUSH_DATA (push, dest->interlaced->offset >> 8);
+   PUSH_DATA (push, dest->interlaced->offset >> 8);
+
+   if (is_ref) {
+      BEGIN_NV04(push, SUBC_VP(0x414), 1);
+      PUSH_DATA (push, dest->full->offset >> 8);
+   }
+
+   BEGIN_NV04(push, SUBC_VP(0x620), 2);
+   PUSH_DATAh(push, dec->vp_fw2_offset);
+   PUSH_DATA (push, dec->vp_fw2_offset);
+
+   BEGIN_NV04(push, SUBC_VP(0x300), 1);
+   PUSH_DATA (push, 0);
+
+   /* Set the semaphore back to 1 */
+   BEGIN_NV04(push, SUBC_VP(0x610), 3);
+   PUSH_DATAh(push, dec->fence->offset);
+   PUSH_DATA (push, dec->fence->offset);
+   PUSH_DATA (push, 1);
+
+   /* Write to the semaphore location, intr */
+   BEGIN_NV04(push, SUBC_VP(0x304), 1);
+   PUSH_DATA (push, 0x101);
+
+   for (i = 0; i < 2; i++) {
+      struct nv50_miptree *mt = nv50_miptree(dest->resources[i]);
+      mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
+   }
+
+   PUSH_KICK (push);
+}
+
+static INLINE int16_t inverse_quantize(int16_t val, uint8_t quant, int mpeg1) {
+   /* This is wrong for non-intra mbs... but mpeg12_bitstream doesn't pass us
+    * the right data to do it right. Need to + sign() before the quant
+    * scale. */
+   int16_t ret = val * quant / 16;
+   if (mpeg1 && ret) ret = (ret - 1) | 1;
+   if (ret < -2048) ret = -2048;
+   else if (ret > 2047) ret = 2047;
+   return ret;
+}
+
+struct mpeg12_mb_info {
+   uint32_t index;
+   uint8_t unk4;
+   uint8_t unk5;
+   uint16_t coded_block_pattern;
+   uint8_t block_counts[6];
+   uint16_t PMV[8];
+   uint16_t skipped;
+};
+
+void
+nv84_decoder_vp_mpeg12_mb(struct nv84_decoder *dec,
+                          struct pipe_mpeg12_picture_desc *desc,
+                          const struct pipe_mpeg12_macroblock *macrob)
+{
+   STATIC_ASSERT(sizeof(struct mpeg12_mb_info) == 32);
+
+   struct mpeg12_mb_info info = {0};
+   int i, mask, block_index, count;
+   const int16_t *blocks;
+   int intra = macrob->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA;
+   int motion = macrob->macroblock_type &
+      (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD);
+   const uint8_t *quant_matrix = intra ? dec->mpeg12_intra_matrix :
+      dec->mpeg12_non_intra_matrix;
+   int mpeg1 = dec->base.profile == PIPE_VIDEO_PROFILE_MPEG1;
+
+   info.index = macrob->y * mb(dec->base.width) + macrob->x;
+   info.unk4 = motion;
+   if (intra)
+      info.unk4 |= 1;
+   if (macrob->macroblock_modes.bits.dct_type)
+      info.unk4 |= 0x20;
+   info.unk5 = (macrob->motion_vertical_field_select << 4) |
+      (macrob->macroblock_modes.value & 0xf);
+   info.coded_block_pattern = macrob->coded_block_pattern;
+   if (motion) {
+      memcpy(info.PMV, macrob->PMV, sizeof(info.PMV));
+   }
+   blocks = macrob->blocks;
+   for (mask = 0x20, block_index = 0; mask > 0; mask >>= 1, block_index++) {
+      if ((macrob->coded_block_pattern & mask) == 0)
+         continue;
+
+      count = 0;
+
+      /*
+       * The observation here is that there are a lot of 0's, and things go
+       * a lot faster if one skips over them.
+       */
+
+#ifdef __SSE4_2__
+#define wordmask(blocks, zero) \
+      _mm_cmpestrm(_mm_load_si128((__m128i *)(blocks)), 8, zero, 8, 0x19)
+
+      __m128i zero = _mm_setzero_si128();
+
+
+      __m128i tmask1, tmask2, tmask3, tmask4, tmask5, tmask6, tmask7, tmask8,
+         tmask9, tmask10, tmask11, tmask12;
+      /*
+       * Shuffle all the bits into the right places by using unpack. This is
+       * faster than doing shifts. Each word mask produces 8 bits, and there
+       * are 8 such masks. The end result that we want is 8 7 6 5 4 3 2 1 in a
+       * 64-bit int. This implementation is considerably faster than the
+       * generic one, on a Core i7-920.
+       */
+      tmask1 = wordmask(blocks, zero); // 1
+      tmask5 = wordmask(blocks + 32, zero); // 5
+      tmask9 = _mm_unpacklo_epi8(tmask1, tmask5); // 5 1
+      tmask3 = wordmask(blocks + 16, zero); // 3
+      tmask7 = wordmask(blocks + 48, zero); // 7
+      tmask10 = _mm_unpacklo_epi8(tmask3, tmask7); // 7 3
+      tmask11 = _mm_unpacklo_epi8(tmask9, tmask10); // 7 5 3 1
+      tmask2 = wordmask(blocks + 8, zero); // 2
+      tmask6 = wordmask(blocks + 40, zero); // 6
+      tmask9 = _mm_unpacklo_epi8(tmask2, tmask6); // 6 2
+      tmask4 = wordmask(blocks + 24, zero); // 4
+      tmask8 = wordmask(blocks + 56, zero); // 8
+      tmask10 = _mm_unpacklo_epi8(tmask4, tmask8); // 8 4
+      tmask12 = _mm_unpacklo_epi8(tmask9, tmask10); // 8 6 4 2
+      tmask9 = _mm_unpacklo_epi8(tmask11, tmask12); // 8 7 6 5 4 3 2 1
+      uint64_t bmask = _mm_extract_epi64(tmask9, 0);
+      i = 0;
+      int shift;
+      if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+         int sum = 0;
+         while ((shift = __builtin_ffsll(bmask))) {
+            int val;
+            // Note that shift might be 64, and >>= 64 is a noop.
+            i += shift - 1;
+            bmask >>= shift - 1;
+            *dec->mpeg12_data++ = dec->zscan[i] * 2;
+            val = inverse_quantize(blocks[i], quant_matrix[i], mpeg1);
+            *dec->mpeg12_data++ = val;
+            sum += val;
+            count++;
+            i++;
+            bmask >>= 1;
+         }
+         if (!mpeg1 && (sum & 1) == 0) {
+            if (count && *(dec->mpeg12_data - 2) == 63 * 2) {
+               uint16_t *val = dec->mpeg12_data - 1;
+               if (*val & 1) *val -= 1;
+               else *val += 1;
+            } else {
+               *dec->mpeg12_data++ = 63 * 2;
+               *dec->mpeg12_data++ = 1;
+               count++;
+            }
+         }
+      } else {
+         while ((shift = __builtin_ffsll(bmask))) {
+            // Note that shift might be 64, and >>= 64 is a noop.
+            i += shift - 1;
+            bmask >>= shift - 1;
+            *dec->mpeg12_data++ = i * 2;
+            *dec->mpeg12_data++ = blocks[i];
+            count++;
+            i++;
+            bmask >>= 1;
+         }
+      }
+#undef wordmask
+#else
+
+      /*
+       * This loop looks ridiculously written... and it is. I tried a lot of
+       * different ways of achieving this scan, and this was the fastest, at
+       * least on a Core i7-920. Note that it's not necessary to skip the 0's,
+       * the firmware will deal with those just fine. But it's faster to skip
+       * them.
+       */
+      i = 0;
+      if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+         int sum = 0;
+         while (true) {
+            int16_t tmp;
+            // All the CPU time goes into this line.
+            while (likely(i < 64 && !(tmp = blocks[i]))) i++;
+            if (i >= 64) break;
+            *dec->mpeg12_data++ = dec->zscan[i] * 2;
+            tmp = inverse_quantize(tmp, quant_matrix[i], mpeg1);
+            *dec->mpeg12_data++ = tmp;
+            sum += tmp;
+            count++;
+            i++;
+         }
+         if (!mpeg1 && (sum & 1) == 0) {
+            if (count && *(dec->mpeg12_data - 2) == 63 * 2) {
+               uint16_t *val = dec->mpeg12_data - 1;
+               if (*val & 1) *val -= 1;
+               else *val += 1;
+            } else {
+               *dec->mpeg12_data++ = 63 * 2;
+               *dec->mpeg12_data++ = 1;
+               count++;
+            }
+         }
+      } else {
+         while (true) {
+            int16_t tmp;
+            // All the CPU time goes into this line.
+            while (likely(i < 64 && !(tmp = blocks[i]))) i++;
+            if (i >= 64) break;
+            *dec->mpeg12_data++ = i * 2;
+            *dec->mpeg12_data++ = tmp;
+            count++;
+            i++;
+         }
+      }
+
+#endif
+
+      if (count) {
+         *(dec->mpeg12_data - 2) |= 1;
+      } else {
+         *dec->mpeg12_data++ = 1;
+         *dec->mpeg12_data++ = 0;
+         count = 1;
+      }
+      info.block_counts[block_index] = count;
+      blocks += 64;
+   }
+
+   memcpy(dec->mpeg12_mb_info, &info, sizeof(info));
+   dec->mpeg12_mb_info += sizeof(info);
+
+   if (macrob->num_skipped_macroblocks) {
+      info.index++;
+      info.coded_block_pattern = 0;
+      info.skipped = macrob->num_skipped_macroblocks - 1;
+      memset(info.block_counts, 0, sizeof(info.block_counts));
+      memcpy(dec->mpeg12_mb_info, &info, sizeof(info));
+      dec->mpeg12_mb_info += sizeof(info);
+   }
+}
+
+struct mpeg12_header {
+   uint32_t luma_top_size; // 00
+   uint32_t luma_bottom_size; // 04
+   uint32_t chroma_top_size; // 08
+   uint32_t mbs; // 0c
+   uint32_t mb_info_size; // 10
+   uint32_t mb_width_minus1; // 14
+   uint32_t mb_height_minus1; // 18
+   uint32_t width; // 1c
+   uint32_t height; // 20
+   uint8_t progressive; // 24
+   uint8_t mocomp_only; // 25
+   uint8_t frames; // 26
+   uint8_t picture_structure; // 27
+   uint32_t unk28; // 28 -- 0x50100
+   uint32_t unk2c; // 2c
+   uint32_t pad[4 * 13];
+};
+
+void
+nv84_decoder_vp_mpeg12(struct nv84_decoder *dec,
+                       struct pipe_mpeg12_picture_desc *desc,
+                       struct nv84_video_buffer *dest)
+{
+   struct nouveau_pushbuf *push = dec->vp_pushbuf;
+   struct nv84_video_buffer *ref1 = (struct nv84_video_buffer *)desc->ref[0];
+   struct nv84_video_buffer *ref2 = (struct nv84_video_buffer *)desc->ref[1];
+   struct nouveau_pushbuf_refn bo_refs[] = {
+      { dest->interlaced, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { NULL, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { NULL, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->mpeg12_bo, NOUVEAU_BO_RDWR | NOUVEAU_BO_GART },
+   };
+   int i, num_refs = sizeof(bo_refs)/sizeof(*bo_refs);
+   struct mpeg12_header header = {0};
+   struct nv50_miptree *y = nv50_miptree(dest->resources[0]);
+   struct nv50_miptree *uv = nv50_miptree(dest->resources[1]);
+
+   STATIC_ASSERT(sizeof(struct mpeg12_header) == 0x100);
+
+   if (ref1 == NULL) ref1 = dest;
+   if (ref2 == NULL) ref2 = dest;
+   bo_refs[1].bo = ref1->interlaced;
+   bo_refs[2].bo = ref2->interlaced;
+
+   header.luma_top_size = y->layer_stride;
+   header.luma_bottom_size = y->layer_stride;
+   header.chroma_top_size = uv->layer_stride;
+   header.mbs = mb(dec->base.width) * mb(dec->base.height);
+   header.mb_info_size = dec->mpeg12_mb_info - dec->mpeg12_bo->map - 0x100;
+   header.mb_width_minus1 = mb(dec->base.width) - 1;
+   header.mb_height_minus1 = mb(dec->base.height) - 1;
+   header.width = align(dec->base.width, 16);
+   header.height = align(dec->base.height, 16);
+   header.progressive = desc->frame_pred_frame_dct;
+   header.frames = 1 + (desc->ref[0] != NULL) + (desc->ref[1] != NULL);
+   header.picture_structure = desc->picture_structure;
+   header.unk28 = 0x50100;
+
+   memcpy(dec->mpeg12_bo->map, &header, sizeof(header));
+
+   PUSH_SPACE(push, 10 + 3 + 2);
+
+   nouveau_pushbuf_refn(push, bo_refs, num_refs);
+
+   BEGIN_NV04(push, SUBC_VP(0x400), 9);
+   PUSH_DATA (push, 0x543210); /* each nibble possibly a dma index */
+   PUSH_DATA (push, 0x555001); /* constant */
+   PUSH_DATA (push, dec->mpeg12_bo->offset >> 8);
+   PUSH_DATA (push, (dec->mpeg12_bo->offset + 0x100) >> 8);
+   PUSH_DATA (push, (dec->mpeg12_bo->offset + 0x100 +
+                     align(0x20 * mb(dec->base.width) *
+                           mb(dec->base.height), 0x100)) >> 8);
+   PUSH_DATA (push, dest->interlaced->offset >> 8);
+   PUSH_DATA (push, ref1->interlaced->offset >> 8);
+   PUSH_DATA (push, ref2->interlaced->offset >> 8);
+   PUSH_DATA (push, 6 * 64 * 8 * header.mbs);
+
+   BEGIN_NV04(push, SUBC_VP(0x620), 2);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, 0);
+
+   BEGIN_NV04(push, SUBC_VP(0x300), 1);
+   PUSH_DATA (push, 0);
+
+   for (i = 0; i < 2; i++) {
+      struct nv50_miptree *mt = nv50_miptree(dest->resources[i]);
+      mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
+   }
+   PUSH_KICK (push);
+}
-- 
1.8.1.5

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [Nouveau] [PATCH] nv50: H.264/MPEG2 decoding support via VP2, available on NV84-NV96, NVA0
  2013-06-27 11:26 [PATCH] nv50: H.264/MPEG2 decoding support via VP2, available on NV84-NV96, NVA0 Ilia Mirkin
@ 2013-06-29 18:07 ` Emil Velikov
  2013-06-29 20:21   ` Ilia Mirkin
       [not found] ` <1372332404-16547-1-git-send-email-imirkin-FrUbXkNCsVf2fBVCVOL8/A@public.gmane.org>
  1 sibling, 1 reply; 7+ messages in thread
From: Emil Velikov @ 2013-06-29 18:07 UTC (permalink / raw)
  To: Ilia Mirkin; +Cc: nouveau, Maarten Lankhorst, emil.l.velikov, mesa-dev

Hi Ilia,

On 27/06/13 12:26, Ilia Mirkin wrote:
> Adds H.264 and MPEG2 codec support via VP2, using firmware from the
> blob. Acceleration is supported at the bitstream level for H.264 and
> IDCT level for MPEG2.
> 
> Known issues:
>  - H.264 interlaced doesn't render properly
>  - H.264 shows very occasional artifacts on a small fraction of videos
>  - MPEG2 + VDPAU shows frequent but small artifacts, which aren't there
>    when using XvMC on the same videos
> 
> Signed-off-by: Ilia Mirkin <imirkin@alum.mit.edu>


Big thanks for working on this

I believe the hardware is capable of accelerating IDCT for VC1. Do you
have any plans for it ?

As far as I know mesa in general is keen on keeping trailing statements
on the next line, as well as 78(80) characters line length.

> ---
> 
> I did try to work out the known issues above, but so far to no avail.
> 
> The kernel support for these engines is not in mainline yet, but it's likely
> going to appear in 3.11. I figured that there would be a bunch of feedback so
> I might as well send it out early.
> 
> I played around a lot with XvMC performance, the mb function shows up as #1 in
> the profiles, with the SSE4.2 optimizations I put in, it drops to #2.
> Something clever can likely be done to improve VDPAU performance as well,
> e.g. using SSE to do the inverse quantization operations, but I've left that
Curious how many machines with vp2 card have a sse4.2 capable CPU? Mine
is only sse4.1 ;(

> out. (It gets tricky because a lot of the data is 0's, so it's unclear whether
> it's faster to use SSE to do operations on everything or one-at-a-time on the
> non-0's like I have it now.) Even with these, XvMC ends up ~20% faster than
> plain CPU decoding, and likely that percent improves on older CPUs that can't
> decode MPEG2 quite as quickly. VDPAU provides further improvements (likely
> because it is able to skip mb's while XvMC can't), but there are artifacts for
> reasons unknown.
> 
> Note that in order to get XvMC to work, you need my previous patch (or
> something similar) since otherwise libXvMCnouveau can't be dlopen'd:
> http://lists.freedesktop.org/archives/mesa-dev/2013-June/040949.html
> 
> If you want to test it out, the kernel patch:
> http://lists.freedesktop.org/archives/nouveau/2013-June/012821.html
> 
> Firmware:
> https://github.com/imirkin/re-vp2/blob/master/extract_firmware.py
> 
>  src/gallium/drivers/nv50/Makefile.sources |   5 +-
>  src/gallium/drivers/nv50/nv50_context.c   |  13 +-
>  src/gallium/drivers/nv50/nv50_context.h   |  24 +
>  src/gallium/drivers/nv50/nv50_miptree.c   |  27 ++
>  src/gallium/drivers/nv50/nv50_resource.h  |   1 +
>  src/gallium/drivers/nv50/nv50_screen.c    |  13 +-
>  src/gallium/drivers/nv50/nv50_winsys.h    |   4 +
>  src/gallium/drivers/nv50/nv84_video.c     | 778 ++++++++++++++++++++++++++++++
>  src/gallium/drivers/nv50/nv84_video.h     | 134 +++++
>  src/gallium/drivers/nv50/nv84_video_bsp.c | 251 ++++++++++
>  src/gallium/drivers/nv50/nv84_video_vp.c  | 521 ++++++++++++++++++++
>  11 files changed, 1768 insertions(+), 3 deletions(-)
>  create mode 100644 src/gallium/drivers/nv50/nv84_video.c
>  create mode 100644 src/gallium/drivers/nv50/nv84_video.h
>  create mode 100644 src/gallium/drivers/nv50/nv84_video_bsp.c
>  create mode 100644 src/gallium/drivers/nv50/nv84_video_vp.c
> 
...
> diff --git a/src/gallium/drivers/nv50/nv84_video.c b/src/gallium/drivers/nv50/nv84_video.c
> new file mode 100644
> index 0000000..064178c
> --- /dev/null
> +++ b/src/gallium/drivers/nv50/nv84_video.c
> @@ -0,0 +1,778 @@
...
> +static int
> +nv84_copy_firmware(const char *path, void *dest, size_t len)
   ssize_t len
To prevent signed/unsigned issues in conditional below

> +{
> +   int fd = open(path, O_RDONLY | O_CLOEXEC);
> +   ssize_t r;
> +   if (fd < 0) {
> +      fprintf(stderr, "opening firmware file %s failed: %m\n", path);
> +      return 1;
> +   }
> +   r = read(fd, dest, len);
> +   close(fd);
> +
> +   if (r != len) {
Here        ^^

> +      fprintf(stderr, "reading firwmare file %s failed: %m\n", path);
> +      return 1;
> +   }
> +
> +   return 0;
> +}
> +
...
> +static void
> +nv84_decoder_decode_bitstream_mpeg12(struct pipe_video_decoder *decoder,
> +                                     struct pipe_video_buffer *video_target,
> +                                     struct pipe_picture_desc *picture,
> +                                     unsigned num_buffers,
> +                                     const void *const *data,
> +                                     const unsigned *num_bytes)
> +{
> +   struct nv84_decoder *dec = (struct nv84_decoder *)decoder;
> +   struct nv84_video_buffer *target = (struct nv84_video_buffer *)video_target;
> +
> +   struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
> +
> +   assert(target->base.buffer_format == PIPE_FORMAT_NV12);
This can be written as
   assert(video_target->buffer_format == PIPE_FORMAT_NV12);

> +
> +   vl_mpg12_bs_decode(dec->mpeg12_bs,
> +                      video_target,
> +                      desc,
> +                      num_buffers,
> +                      data,
> +                      num_bytes);
And then the temporary variables can be removed, as you've done in
nv84_decoder_end_frame_mpeg12()

> +}
> +
...
> +
> +struct pipe_video_decoder *
> +nv84_create_decoder(struct pipe_context *context,
> +                    enum pipe_video_profile profile,
> +                    enum pipe_video_entrypoint entrypoint,
> +                    enum pipe_video_chroma_format chroma_format,
> +                    unsigned width, unsigned height,
> +                    unsigned max_references,
> +                    bool chunked_decode)
> +{
> +   struct nv50_context *nv50 = (struct nv50_context *)context;
> +   struct nouveau_screen *screen = &nv50->screen->base;
> +   struct nv84_decoder *dec;
> +   struct nouveau_pushbuf *bsp_push, *vp_push;
> +   struct nv50_surface surf;
> +   struct nv50_miptree mip;
> +   union pipe_color_union color;
> +   struct nv04_fifo nv04_data = { .vram = 0xbeef0201, .gart = 0xbeef0202 };
> +   int ret, i;
> +   int is_h264 = u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG4_AVC;
> +   int is_mpeg12 = u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12;
> +   struct nouveau_pushbuf_refn fence_ref[] = {
> +      { NULL, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
> +   };
> +
> +
> +   if (getenv("XVMC_VL"))
> +      return vl_create_decoder(context, profile, entrypoint,
> +                               chroma_format, width, height,
> +                               max_references, chunked_decode);
> +
> +   if ((is_h264 && entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM) ||
> +       (is_mpeg12 && entrypoint > PIPE_VIDEO_ENTRYPOINT_IDCT)) {
> +      debug_printf("%x\n", entrypoint);
> +      return NULL;
> +   }
> +
> +   if (!is_h264 && !is_mpeg12) {
> +      debug_printf("invalid profile: %x\n", profile);
> +      return NULL;
> +   }
> +
> +   dec = CALLOC_STRUCT(nv84_decoder);
> +   if (!dec) return NULL;
> +
> +   dec->base.context = context;
> +   dec->base.profile = profile;
> +   dec->base.entrypoint = entrypoint;
> +   dec->base.chroma_format = chroma_format;
> +   dec->base.width = width;
> +   dec->base.height = height;
> +   dec->base.max_references = max_references;
> +   dec->base.destroy = nv84_decoder_destroy;
> +   dec->base.flush = nv84_decoder_flush;
> +   if (is_h264) {
> +      dec->base.decode_bitstream = nv84_decoder_decode_bitstream_h264;
> +      dec->base.begin_frame = nv84_decoder_begin_frame_h264;
> +      dec->base.end_frame = nv84_decoder_end_frame_h264;
> +
> +      dec->frame_mbs = mb(dec->base.width) * mb_half(dec->base.height) * 2;
> +      dec->frame_size = dec->frame_mbs << 8;
> +      dec->vpring_deblock = align(0x30 * dec->frame_mbs, 0x100);
> +      dec->vpring_residual = 0x2000 + MAX2(0x32000, 0x600 * dec->frame_mbs);
> +      dec->vpring_ctrl = MAX2(0x10000, align(0x1080 + 0x144 * dec->frame_mbs, 0x100));
> +   } else if (is_mpeg12) {
> +      dec->base.decode_macroblock = nv84_decoder_decode_macroblock;
> +      dec->base.begin_frame = nv84_decoder_begin_frame_mpeg12;
> +      dec->base.end_frame = nv84_decoder_end_frame_mpeg12;
> +
> +      if (entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
> +         dec->mpeg12_bs = CALLOC_STRUCT(vl_mpg12_bs);
> +         if (!dec->mpeg12_bs)
> +            goto fail;
> +         vl_mpg12_bs_init(dec->mpeg12_bs, &dec->base);
> +         dec->base.decode_bitstream = nv84_decoder_decode_bitstream_mpeg12;
> +      }
> +   } else {
> +      goto fail;
Seems to be handled already by - if (!is_h264 && !is_mpeg12)...

> +   }
> +
> +   ret = nouveau_client_new(screen->device, &dec->client);
> +   if (ret)
> +      goto fail;
Is there any particular reason for using a variable to store the return
value through this functions? Me thinks it can be safely purged, making
the code a bit cleaner

> +
> +   if (is_h264) {
> +      ret = nouveau_object_new(&screen->device->object, 0,
> +                               NOUVEAU_FIFO_CHANNEL_CLASS,
> +                               &nv04_data, sizeof(nv04_data), &dec->bsp_channel);
...

> +   if (is_h264) {
> +      /* Zero out some parts of mbring/vpring. there's gotta be some cleaner way
> +       * of doing this... perhaps makes sense to just copy the relevant logic
> +       * here. */
> +      color.f[0] = color.f[1] = color.f[2] = color.f[3] = 0;
> +      surf.offset = dec->frame_size;
> +      surf.width = 64;
> +      surf.height = (max_references + 1) * dec->frame_mbs / 4;
> +      surf.depth = 1;
> +      surf.base.format = PIPE_FORMAT_B8G8R8A8_UNORM;
> +      surf.base.u.tex.level = 0;
> +      surf.base.texture = &mip.base.base;
> +      mip.level[0].tile_mode = 0;
> +      mip.level[0].pitch = surf.width * 4;
> +      mip.base.domain = NOUVEAU_BO_VRAM;
> +      mip.base.bo = dec->mbring;
> +      context->clear_render_target(context, (struct pipe_surface *)&surf, &color, 0, 0, 64, 4760);
> +      surf.offset = dec->vpring->size / 2 - 0x1000;
> +      surf.width = 1024;
> +      surf.height = 1;
> +      mip.level[0].pitch = surf.width * 4;
> +      mip.base.bo = dec->vpring;
> +      context->clear_render_target(context, (struct pipe_surface *)&surf, &color, 0, 0, 1024, 1);
> +      surf.offset = dec->vpring->size - 0x1000;
> +      context->clear_render_target(context, (struct pipe_surface *)&surf, &color, 0, 0, 1024, 1);
> +
> +      PUSH_SPACE(screen->pushbuf, 5);
> +      fence_ref[0].bo = dec->fence;
> +      nouveau_pushbuf_refn(screen->pushbuf, fence_ref, 1);
> +      /* The clear_render_target is done via 3D engine, so use it to write to a
> +       * sempahore to indicate that it's done.
> +       */
> +      BEGIN_NV04(screen->pushbuf, SUBC_3D(0x1b00), 4);
> +      PUSH_DATAh(screen->pushbuf, dec->fence->offset);
> +      PUSH_DATA (screen->pushbuf, dec->fence->offset);
> +      PUSH_DATA (screen->pushbuf, 1);
> +      PUSH_DATA (screen->pushbuf, 0xf010);
> +      PUSH_KICK (screen->pushbuf);
> +
> +      PUSH_SPACE(bsp_push, 2 + 12 + 2 + 4 + 3);
> +
> +      BEGIN_NV04(bsp_push, SUBC_BSP(NV01_SUBCHAN_OBJECT), 1);
> +      PUSH_DATA (bsp_push, dec->bsp->handle);
> +
> +      BEGIN_NV04(bsp_push, SUBC_BSP(0x180), 11);
> +      for (i = 0; i < 11; i++)
Any idea where 11 comes from ? Is it related to some other parameter ?

> +         PUSH_DATA(bsp_push, nv04_data.vram);
> +      BEGIN_NV04(bsp_push, SUBC_BSP(0x1b8), 1);
> +      PUSH_DATA (bsp_push, nv04_data.vram);
> +
...

> +struct pipe_video_buffer *
> +nv84_video_buffer_create(struct pipe_context *pipe,
> +                         const struct pipe_video_buffer *template)
> +{
...
> +   buffer->base.buffer_format = template->buffer_format;
> +   buffer->base.context = pipe;
> +   buffer->base.destroy = nv84_video_buffer_destroy;
> +   buffer->base.chroma_format = template->chroma_format;
> +   buffer->base.width = template->width;
> +   buffer->base.height = template->height;
> +   buffer->base.get_sampler_view_planes = nv84_video_buffer_sampler_view_planes;
> +   buffer->base.get_sampler_view_components = nv84_video_buffer_sampler_view_components;
> +   buffer->base.get_surfaces = nv84_video_buffer_surfaces;
> +   buffer->base.interlaced = true;
By storing the number of planes, will be able to demagic some constants
later on
   buffer->num_planes = 2;

> +
> +   memset(&templ, 0, sizeof(templ));
> +   templ.target = PIPE_TEXTURE_2D_ARRAY;
> +   templ.depth0 = 1;
> +   templ.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
> +   templ.format = PIPE_FORMAT_R8_UNORM;
> +   templ.width0 = align(template->width, 2);
> +   templ.height0 = align(template->height, 4) / 2;
> +   templ.flags = NV50_RESOURCE_FLAG_VIDEO;
> +   templ.array_size = 2;
> +
> +   cfg.nv50.tile_mode = 0x20;
> +   cfg.nv50.memtype = 0x70;
> +
> +   buffer->resources[0] = pipe->screen->resource_create(pipe->screen, &templ);
> +   if (!buffer->resources[0])
> +      goto error;
> +
> +   templ.format = PIPE_FORMAT_R8G8_UNORM;
> +   templ.width0 /= 2;
> +   templ.height0 /= 2;
> +   buffer->resources[1] = pipe->screen->resource_create(pipe->screen, &templ);
> +   if (!buffer->resources[1])
> +      goto error;
I believe that the nvc0 version of the code is easier to read (bikeshed)

for (i = 1; i < buffer->num_planes; ++i) {
   buffer->resources[i] = pipe->screen->resource_create(pipe->screen,
&templ);
   if (!buffer->resources[i])
      goto error;
}

> +
> +   mt0 = nv50_miptree(buffer->resources[0]);
> +   mt1 = nv50_miptree(buffer->resources[1]);
> +
> +   bo_size = mt0->total_size + mt1->total_size;
> +   if (nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP, 0,
> +                      bo_size, &cfg, &buffer->interlaced))
> +      goto error;
> +   /* XXX Change reference frame management so that this is only allocated in
> +    * the decoder when necessary. */
> +   if (nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP, 0,
> +                      bo_size, &cfg, &buffer->full))
> +      goto error;
> +
> +   mt0->base.bo = buffer->interlaced;
> +   mt0->base.domain = NOUVEAU_BO_VRAM;
> +   mt0->base.offset = 0;
> +   mt0->base.address = buffer->interlaced->offset;
IMHO this looks a bit easier to grasp
   mt0->base.address = buffer->interlaced->offset + mt0->base.offset;

> +   nouveau_bo_ref(buffer->interlaced, &empty);
> +
> +   mt1->base.bo = buffer->interlaced;
> +   mt1->base.domain = NOUVEAU_BO_VRAM;
> +   mt1->base.offset = mt0->layer_stride * 2;
> +   mt1->base.address = buffer->interlaced->offset + mt0->layer_stride * 2;
Similar
   mt1->base.address = buffer->interlaced->offset + mt1->base.offset;

> +   nouveau_bo_ref(buffer->interlaced, &empty);
> +
> +   memset(&sv_templ, 0, sizeof(sv_templ));
> +   for (component = 0, i = 0; i < 2; ++i ) {
   for (component = 0, i = 0; i < buffer->num_planes; ++i ) {

> +      struct pipe_resource *res = buffer->resources[i];
> +      unsigned nr_components = util_format_get_nr_components(res->format);
> +
> +      u_sampler_view_default_template(&sv_templ, res, res->format);
> +      buffer->sampler_view_planes[i] = pipe->create_sampler_view(pipe, res, &sv_templ);
> +      if (!buffer->sampler_view_planes[i])
> +         goto error;
> +
> +      for (j = 0; j < nr_components; ++j, ++component) {
> +         sv_templ.swizzle_r = sv_templ.swizzle_g = sv_templ.swizzle_b = PIPE_SWIZZLE_RED + j;
> +         sv_templ.swizzle_a = PIPE_SWIZZLE_ONE;
> +
> +         buffer->sampler_view_components[component] = pipe->create_sampler_view(pipe, res, &sv_templ);
> +         if (!buffer->sampler_view_components[component])
> +            goto error;
> +      }
> +   }
> +
> +   memset(&surf_templ, 0, sizeof(surf_templ));
> +   for (j = 0; j < 2; ++j) {
   for (j = 0; j < buffer->num_planes; ++j) {

> +      surf_templ.format = buffer->resources[j]->format;
> +      surf_templ.u.tex.first_layer = surf_templ.u.tex.last_layer = 0;
> +      buffer->surfaces[j * 2] = pipe->create_surface(pipe, buffer->resources[j], &surf_templ);
> +      if (!buffer->surfaces[j * 2])
> +         goto error;
> +
> +      surf_templ.u.tex.first_layer = surf_templ.u.tex.last_layer = 1;
> +      buffer->surfaces[j * 2 + 1] = pipe->create_surface(pipe, buffer->resources[j], &surf_templ);
> +      if (!buffer->surfaces[j * 2 + 1])
> +         goto error;
> +   }
> +
> +   return &buffer->base;
> +
> +error:
> +   nv84_video_buffer_destroy(&buffer->base);
> +   return NULL;
> +}
> +
> +int
> +nv84_screen_get_video_param(struct pipe_screen *pscreen,
> +                            enum pipe_video_profile profile,
> +                            enum pipe_video_cap param)
> +{
> +   switch (param) {
> +   case PIPE_VIDEO_CAP_SUPPORTED:
> +      return u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG4_AVC ||
> +         u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12;
      switch (u_reduce_video_profile(profile)) {
      case PIPE_VIDEO_CODEC_MPEG12:
      case PIPE_VIDEO_CODEC_MPEG4_AVC:
         return true;
      case PIPE_VIDEO_CODEC_VC1:
         /* TODO: Hardware is capable of IDCT acceleration for VC1*/
      case PIPE_VIDEO_CODEC_MPEG4:
      default:
         return false;
      }

> +   case PIPE_VIDEO_CAP_NPOT_TEXTURES:
> +      return 1;
> +   case PIPE_VIDEO_CAP_MAX_WIDTH:
> +   case PIPE_VIDEO_CAP_MAX_HEIGHT:
> +      return 2048;
> +   case PIPE_VIDEO_CAP_PREFERED_FORMAT:
> +      return PIPE_FORMAT_NV12;
> +   case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED:
> +   case PIPE_VIDEO_CAP_PREFERS_INTERLACED:
> +      return true;
> +   case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE:
> +      return false;
> +   default:
> +      debug_printf("unknown video param: %d\n", param);
> +      return 0;
> +   }
> +}
> +
> +boolean
> +nv84_screen_video_supported(struct pipe_screen *screen,
> +                            enum pipe_format format,
> +                            enum pipe_video_profile profile)
> +{
> +   return format == PIPE_FORMAT_NV12;
Will this work when we have XVMC_VL set ?

> +}
> diff --git a/src/gallium/drivers/nv50/nv84_video.h b/src/gallium/drivers/nv50/nv84_video.h
> new file mode 100644
> index 0000000..4ff8cf3
> --- /dev/null
> +++ b/src/gallium/drivers/nv50/nv84_video.h
...
> +struct nv84_video_buffer {
> +   struct pipe_video_buffer base;
   unsigned num_planes;

> +   struct pipe_resource *resources[VL_NUM_COMPONENTS];
> +   struct pipe_sampler_view *sampler_view_planes[VL_NUM_COMPONENTS];
> +   struct pipe_sampler_view *sampler_view_components[VL_NUM_COMPONENTS];
> +   struct pipe_surface *surfaces[VL_NUM_COMPONENTS * 2];
> +
> +   struct nouveau_bo *interlaced, *full;
> +   int mvidx;
> +   unsigned frame_num, frame_num_max;
> +};
> +

Looking at the params associated with each video engine, I was wondering
about compacting it into a struct (names chosen are the first thing that
came to mind)

struct nv84_decoder_eng {
   struct nouveau_object *obj;
   struct nouveau_object *channel;
   struct nouveau_pushbuf *pushbuf;
   struct nouveau_bufctx *bufctx;

   struct nouveau_bo *fw;
   struct nouveau_bo *data;
}

and then having an enum for the different engine types

enum nv84_decoder_eng_type
{
   BSP = 0,
   VP
};

#define NV84_DECODER_ENG_NUM VP + 1

> +struct nv84_decoder {
> +   struct pipe_video_decoder base;
> +   struct nouveau_client *client;
> +   struct nouveau_object *bsp_channel, *vp_channel, *bsp, *vp;
> +   struct nouveau_pushbuf *bsp_pushbuf, *vp_pushbuf;
> +   struct nouveau_bufctx *bsp_bufctx, *vp_bufctx;
Then the struct will look a bit cleaner
struct nv84_decoder {
   struct pipe_video_decoder base;
   struct nouveau_client *client;
   struct nv84_decoder_eng engs[NV84_DECODER_ENG_NUM];


> +
> +   struct nouveau_bo *bsp_fw, *bsp_data;
> +   struct nouveau_bo *vp_fw, *vp_data;
> +   struct nouveau_bo *mbring, *vpring;
> +
> +   /*
> +    * states:
> +    *  0: init
> +    *  1: vpring/mbring cleared, bsp is ready
> +    *  2: bsp is done, vp is ready
> +    * and then vp it back to 1
> +    */
> +   struct nouveau_bo *fence;
> +
> +   struct nouveau_bo *bitstream;
> +   struct nouveau_bo *vp_params;
> +
> +   size_t vp_fw2_offset;
> +
> +   unsigned frame_mbs, frame_size;
> +   /* VPRING layout:
> +        RESIDUAL
> +        CTRL
> +        DEBLOCK
> +        0x1000
> +   */
> +   unsigned vpring_deblock, vpring_residual, vpring_ctrl;
> +
> +
> +   struct vl_mpg12_bs *mpeg12_bs;
> +
> +   struct nouveau_bo *mpeg12_bo;
> +   void *mpeg12_mb_info;
> +   uint16_t *mpeg12_data;
> +   const int *zscan;
> +   uint8_t mpeg12_intra_matrix[64];
> +   uint8_t mpeg12_non_intra_matrix[64];
> +};
> +
...

> +static INLINE uint32_t mb(uint32_t coord)
> +{
> +   return (coord + 0xf)>>4;
> +}
> +
> +static INLINE uint32_t mb_half(uint32_t coord)
> +{
> +   return (coord + 0x1f)>>5;
> +}
How about moving these in nouveau_video.h ? (and removing the duplicate
copy from nvc0_video.h)

Might be better as a follow on patch

...
> diff --git a/src/gallium/drivers/nv50/nv84_video_vp.c b/src/gallium/drivers/nv50/nv84_video_vp.c
> new file mode 100644
> index 0000000..60c0848
> --- /dev/null
> +++ b/src/gallium/drivers/nv50/nv84_video_vp.c
> @@ -0,0 +1,521 @@
> +/*
> + * Copyright 2013 Ilia Mirkin
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + */
> +
> +#include <immintrin.h>
Wrap this include in
#ifdef __SSE4_2__
#endif

considering its definitions are used in a similar block
...

> +void
> +nv84_decoder_vp_h264(struct nv84_decoder *dec,
> +                     struct pipe_h264_picture_desc *desc,
> +                     struct nv84_video_buffer *dest)
> +{
...

> +   for (i = 0; i < 2; i++) {
   for (i = 0; i < dest->num_planes; i++) {

> +      struct nv50_miptree *mt = nv50_miptree(dest->resources[i]);
> +      mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
> +   }
> +
> +   PUSH_KICK (push);
> +}
> +
...

> +void
> +nv84_decoder_vp_mpeg12_mb(struct nv84_decoder *dec,
> +                          struct pipe_mpeg12_picture_desc *desc,
> +                          const struct pipe_mpeg12_macroblock *macrob)
> +{
...

> +#ifdef __SSE4_2__
IMHO this may produce non portable binaries in case of aggressive
mtune/march flags. I'm not objecting against it just pointing out

> +void
> +nv84_decoder_vp_mpeg12(struct nv84_decoder *dec,
> +                       struct pipe_mpeg12_picture_desc *desc,
> +                       struct nv84_video_buffer *dest)
> +{
...

> +   for (i = 0; i < 2; i++) {
   for (i = 0; i < dest->num_planes; i++) {


Cheers
Emil

> +      struct nv50_miptree *mt = nv50_miptree(dest->resources[i]);
> +      mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
> +   }
> +   PUSH_KICK (push);
> +}
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [Nouveau] [PATCH] nv50: H.264/MPEG2 decoding support via VP2, available on NV84-NV96, NVA0
  2013-06-29 18:07 ` [Nouveau] " Emil Velikov
@ 2013-06-29 20:21   ` Ilia Mirkin
  2013-06-30  0:33     ` Emil Velikov
  0 siblings, 1 reply; 7+ messages in thread
From: Ilia Mirkin @ 2013-06-29 20:21 UTC (permalink / raw)
  To: Emil Velikov; +Cc: mesa-dev, Maarten Lankhorst, nouveau

On Sat, Jun 29, 2013 at 2:07 PM, Emil Velikov <emil.l.velikov@gmail.com> wrote:
> Hi Ilia,
>
> On 27/06/13 12:26, Ilia Mirkin wrote:
>> Adds H.264 and MPEG2 codec support via VP2, using firmware from the
>> blob. Acceleration is supported at the bitstream level for H.264 and
>> IDCT level for MPEG2.
>>
>> Known issues:
>>  - H.264 interlaced doesn't render properly
>>  - H.264 shows very occasional artifacts on a small fraction of videos
>>  - MPEG2 + VDPAU shows frequent but small artifacts, which aren't there
>>    when using XvMC on the same videos
>>
>> Signed-off-by: Ilia Mirkin <imirkin@alum.mit.edu>
>
>
> Big thanks for working on this
>
> I believe the hardware is capable of accelerating IDCT for VC1. Do you
> have any plans for it ?

Nope. With MPEG, there's XvMC, which sends the macroblocks directly.
And there's an existing MPEG parser in gallium which can be used for
VDPAU. With VC-1, no such thing as XvMC. Which means that I'd have to
implement a bitstream parser in gallium. The spec is closed, and I'm
not at all familiar with the concepts used in VC-1.

>
> As far as I know mesa in general is keen on keeping trailing statements
> on the next line, as well as 78(80) characters line length.

I tried to do that... in the places where I violate that I felt that
it enhanced readability. Feel free to point out places where you
disagree. Also if it's a hard policy (e.g. as in the linux kernel), I
can go through and reformat.

>
>> ---
>>
>> I did try to work out the known issues above, but so far to no avail.
>>
>> The kernel support for these engines is not in mainline yet, but it's likely
>> going to appear in 3.11. I figured that there would be a bunch of feedback so
>> I might as well send it out early.
>>
>> I played around a lot with XvMC performance, the mb function shows up as #1 in
>> the profiles, with the SSE4.2 optimizations I put in, it drops to #2.
>> Something clever can likely be done to improve VDPAU performance as well,
>> e.g. using SSE to do the inverse quantization operations, but I've left that
> Curious how many machines with vp2 card have a sse4.2 capable CPU? Mine
> is only sse4.1 ;(

At least one (mine). SSE4.2 appeared on Core i7 (Nehalem), AFAIK.
Other optimizations may be possible with just SSE2, but I couldn't
think of anything particularly clever. Those PCMPESTRM instructions
are _really_ useful to skip over the useless 0's. I guess something
could be done with PCMPEQW and PMOVMSKB (wish there were a
PMOVMSKW...). I'll play with it. But the plain-C loop isn't so bad
either.

>
>> out. (It gets tricky because a lot of the data is 0's, so it's unclear whether
>> it's faster to use SSE to do operations on everything or one-at-a-time on the
>> non-0's like I have it now.) Even with these, XvMC ends up ~20% faster than
>> plain CPU decoding, and likely that percent improves on older CPUs that can't
>> decode MPEG2 quite as quickly. VDPAU provides further improvements (likely
>> because it is able to skip mb's while XvMC can't), but there are artifacts for
>> reasons unknown.
>>
>> Note that in order to get XvMC to work, you need my previous patch (or
>> something similar) since otherwise libXvMCnouveau can't be dlopen'd:
>> http://lists.freedesktop.org/archives/mesa-dev/2013-June/040949.html
>>
>> If you want to test it out, the kernel patch:
>> http://lists.freedesktop.org/archives/nouveau/2013-June/012821.html
>>
>> Firmware:
>> https://github.com/imirkin/re-vp2/blob/master/extract_firmware.py
>>
>>  src/gallium/drivers/nv50/Makefile.sources |   5 +-
>>  src/gallium/drivers/nv50/nv50_context.c   |  13 +-
>>  src/gallium/drivers/nv50/nv50_context.h   |  24 +
>>  src/gallium/drivers/nv50/nv50_miptree.c   |  27 ++
>>  src/gallium/drivers/nv50/nv50_resource.h  |   1 +
>>  src/gallium/drivers/nv50/nv50_screen.c    |  13 +-
>>  src/gallium/drivers/nv50/nv50_winsys.h    |   4 +
>>  src/gallium/drivers/nv50/nv84_video.c     | 778 ++++++++++++++++++++++++++++++
>>  src/gallium/drivers/nv50/nv84_video.h     | 134 +++++
>>  src/gallium/drivers/nv50/nv84_video_bsp.c | 251 ++++++++++
>>  src/gallium/drivers/nv50/nv84_video_vp.c  | 521 ++++++++++++++++++++
>>  11 files changed, 1768 insertions(+), 3 deletions(-)
>>  create mode 100644 src/gallium/drivers/nv50/nv84_video.c
>>  create mode 100644 src/gallium/drivers/nv50/nv84_video.h
>>  create mode 100644 src/gallium/drivers/nv50/nv84_video_bsp.c
>>  create mode 100644 src/gallium/drivers/nv50/nv84_video_vp.c
>>
> ...
>> diff --git a/src/gallium/drivers/nv50/nv84_video.c b/src/gallium/drivers/nv50/nv84_video.c
>> new file mode 100644
>> index 0000000..064178c
>> --- /dev/null
>> +++ b/src/gallium/drivers/nv50/nv84_video.c
>> @@ -0,0 +1,778 @@
> ...
>> +static int
>> +nv84_copy_firmware(const char *path, void *dest, size_t len)
>    ssize_t len
> To prevent signed/unsigned issues in conditional below
>
>> +{
>> +   int fd = open(path, O_RDONLY | O_CLOEXEC);
>> +   ssize_t r;
>> +   if (fd < 0) {
>> +      fprintf(stderr, "opening firmware file %s failed: %m\n", path);
>> +      return 1;
>> +   }
>> +   r = read(fd, dest, len);
>> +   close(fd);
>> +
>> +   if (r != len) {
> Here        ^^

Yes, in case that len is 2^64 - 1... but I'll change it :)

>
>> +      fprintf(stderr, "reading firwmare file %s failed: %m\n", path);
>> +      return 1;
>> +   }
>> +
>> +   return 0;
>> +}
>> +
> ...
>> +static void
>> +nv84_decoder_decode_bitstream_mpeg12(struct pipe_video_decoder *decoder,
>> +                                     struct pipe_video_buffer *video_target,
>> +                                     struct pipe_picture_desc *picture,
>> +                                     unsigned num_buffers,
>> +                                     const void *const *data,
>> +                                     const unsigned *num_bytes)
>> +{
>> +   struct nv84_decoder *dec = (struct nv84_decoder *)decoder;
>> +   struct nv84_video_buffer *target = (struct nv84_video_buffer *)video_target;
>> +
>> +   struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
>> +
>> +   assert(target->base.buffer_format == PIPE_FORMAT_NV12);
> This can be written as
>    assert(video_target->buffer_format == PIPE_FORMAT_NV12);

Good point. The code was a bit different at an earlier point.

>
>> +
>> +   vl_mpg12_bs_decode(dec->mpeg12_bs,
>> +                      video_target,
>> +                      desc,
>> +                      num_buffers,
>> +                      data,
>> +                      num_bytes);
> And then the temporary variables can be removed, as you've done in
> nv84_decoder_end_frame_mpeg12()
>
>> +}
>> +
> ...
>> +
>> +struct pipe_video_decoder *
>> +nv84_create_decoder(struct pipe_context *context,
>> +                    enum pipe_video_profile profile,
>> +                    enum pipe_video_entrypoint entrypoint,
>> +                    enum pipe_video_chroma_format chroma_format,
>> +                    unsigned width, unsigned height,
>> +                    unsigned max_references,
>> +                    bool chunked_decode)
>> +{
>> +   struct nv50_context *nv50 = (struct nv50_context *)context;
>> +   struct nouveau_screen *screen = &nv50->screen->base;
>> +   struct nv84_decoder *dec;
>> +   struct nouveau_pushbuf *bsp_push, *vp_push;
>> +   struct nv50_surface surf;
>> +   struct nv50_miptree mip;
>> +   union pipe_color_union color;
>> +   struct nv04_fifo nv04_data = { .vram = 0xbeef0201, .gart = 0xbeef0202 };
>> +   int ret, i;
>> +   int is_h264 = u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG4_AVC;
>> +   int is_mpeg12 = u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12;
>> +   struct nouveau_pushbuf_refn fence_ref[] = {
>> +      { NULL, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
>> +   };
>> +
>> +
>> +   if (getenv("XVMC_VL"))
>> +      return vl_create_decoder(context, profile, entrypoint,
>> +                               chroma_format, width, height,
>> +                               max_references, chunked_decode);
>> +
>> +   if ((is_h264 && entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM) ||
>> +       (is_mpeg12 && entrypoint > PIPE_VIDEO_ENTRYPOINT_IDCT)) {
>> +      debug_printf("%x\n", entrypoint);
>> +      return NULL;
>> +   }
>> +
>> +   if (!is_h264 && !is_mpeg12) {
>> +      debug_printf("invalid profile: %x\n", profile);
>> +      return NULL;
>> +   }
>> +
>> +   dec = CALLOC_STRUCT(nv84_decoder);
>> +   if (!dec) return NULL;
>> +
>> +   dec->base.context = context;
>> +   dec->base.profile = profile;
>> +   dec->base.entrypoint = entrypoint;
>> +   dec->base.chroma_format = chroma_format;
>> +   dec->base.width = width;
>> +   dec->base.height = height;
>> +   dec->base.max_references = max_references;
>> +   dec->base.destroy = nv84_decoder_destroy;
>> +   dec->base.flush = nv84_decoder_flush;
>> +   if (is_h264) {
>> +      dec->base.decode_bitstream = nv84_decoder_decode_bitstream_h264;
>> +      dec->base.begin_frame = nv84_decoder_begin_frame_h264;
>> +      dec->base.end_frame = nv84_decoder_end_frame_h264;
>> +
>> +      dec->frame_mbs = mb(dec->base.width) * mb_half(dec->base.height) * 2;
>> +      dec->frame_size = dec->frame_mbs << 8;
>> +      dec->vpring_deblock = align(0x30 * dec->frame_mbs, 0x100);
>> +      dec->vpring_residual = 0x2000 + MAX2(0x32000, 0x600 * dec->frame_mbs);
>> +      dec->vpring_ctrl = MAX2(0x10000, align(0x1080 + 0x144 * dec->frame_mbs, 0x100));
>> +   } else if (is_mpeg12) {
>> +      dec->base.decode_macroblock = nv84_decoder_decode_macroblock;
>> +      dec->base.begin_frame = nv84_decoder_begin_frame_mpeg12;
>> +      dec->base.end_frame = nv84_decoder_end_frame_mpeg12;
>> +
>> +      if (entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
>> +         dec->mpeg12_bs = CALLOC_STRUCT(vl_mpg12_bs);
>> +         if (!dec->mpeg12_bs)
>> +            goto fail;
>> +         vl_mpg12_bs_init(dec->mpeg12_bs, &dec->base);
>> +         dec->base.decode_bitstream = nv84_decoder_decode_bitstream_mpeg12;
>> +      }
>> +   } else {
>> +      goto fail;
> Seems to be handled already by - if (!is_h264 && !is_mpeg12)...

Yes. But I disliked relying on that, so I decided to leave it in.

>
>> +   }
>> +
>> +   ret = nouveau_client_new(screen->device, &dec->client);
>> +   if (ret)
>> +      goto fail;
> Is there any particular reason for using a variable to store the return
> value through this functions? Me thinks it can be safely purged, making
> the code a bit cleaner

It was like that in nvc0_video and a lot of other code. I think the
code looks a little odd when you have

if (super-long multi-line statement)
  goto fail

>
>> +
>> +   if (is_h264) {
>> +      ret = nouveau_object_new(&screen->device->object, 0,
>> +                               NOUVEAU_FIFO_CHANNEL_CLASS,
>> +                               &nv04_data, sizeof(nv04_data), &dec->bsp_channel);
> ...
>
>> +   if (is_h264) {
>> +      /* Zero out some parts of mbring/vpring. there's gotta be some cleaner way
>> +       * of doing this... perhaps makes sense to just copy the relevant logic
>> +       * here. */
>> +      color.f[0] = color.f[1] = color.f[2] = color.f[3] = 0;
>> +      surf.offset = dec->frame_size;
>> +      surf.width = 64;
>> +      surf.height = (max_references + 1) * dec->frame_mbs / 4;
>> +      surf.depth = 1;
>> +      surf.base.format = PIPE_FORMAT_B8G8R8A8_UNORM;
>> +      surf.base.u.tex.level = 0;
>> +      surf.base.texture = &mip.base.base;
>> +      mip.level[0].tile_mode = 0;
>> +      mip.level[0].pitch = surf.width * 4;
>> +      mip.base.domain = NOUVEAU_BO_VRAM;
>> +      mip.base.bo = dec->mbring;
>> +      context->clear_render_target(context, (struct pipe_surface *)&surf, &color, 0, 0, 64, 4760);
>> +      surf.offset = dec->vpring->size / 2 - 0x1000;
>> +      surf.width = 1024;
>> +      surf.height = 1;
>> +      mip.level[0].pitch = surf.width * 4;
>> +      mip.base.bo = dec->vpring;
>> +      context->clear_render_target(context, (struct pipe_surface *)&surf, &color, 0, 0, 1024, 1);
>> +      surf.offset = dec->vpring->size - 0x1000;
>> +      context->clear_render_target(context, (struct pipe_surface *)&surf, &color, 0, 0, 1024, 1);
>> +
>> +      PUSH_SPACE(screen->pushbuf, 5);
>> +      fence_ref[0].bo = dec->fence;
>> +      nouveau_pushbuf_refn(screen->pushbuf, fence_ref, 1);
>> +      /* The clear_render_target is done via 3D engine, so use it to write to a
>> +       * sempahore to indicate that it's done.
>> +       */
>> +      BEGIN_NV04(screen->pushbuf, SUBC_3D(0x1b00), 4);
>> +      PUSH_DATAh(screen->pushbuf, dec->fence->offset);
>> +      PUSH_DATA (screen->pushbuf, dec->fence->offset);
>> +      PUSH_DATA (screen->pushbuf, 1);
>> +      PUSH_DATA (screen->pushbuf, 0xf010);
>> +      PUSH_KICK (screen->pushbuf);
>> +
>> +      PUSH_SPACE(bsp_push, 2 + 12 + 2 + 4 + 3);
>> +
>> +      BEGIN_NV04(bsp_push, SUBC_BSP(NV01_SUBCHAN_OBJECT), 1);
>> +      PUSH_DATA (bsp_push, dec->bsp->handle);
>> +
>> +      BEGIN_NV04(bsp_push, SUBC_BSP(0x180), 11);
>> +      for (i = 0; i < 11; i++)
> Any idea where 11 comes from ? Is it related to some other parameter ?

These are the dma indices (e.g. when i use 0x654321, each nibble is a
dmaidx, which is an offset into these 0x180 values). It's the part of
the whole thing that I understand the least... in theory I should be
able to just initialize one of them and use it throughout, but I
haven't been brave enough to put that theory to the test. This is how
the blob does it.

>
>> +         PUSH_DATA(bsp_push, nv04_data.vram);
>> +      BEGIN_NV04(bsp_push, SUBC_BSP(0x1b8), 1);
>> +      PUSH_DATA (bsp_push, nv04_data.vram);
>> +
> ...
>
>> +struct pipe_video_buffer *
>> +nv84_video_buffer_create(struct pipe_context *pipe,
>> +                         const struct pipe_video_buffer *template)
>> +{
> ...
>> +   buffer->base.buffer_format = template->buffer_format;
>> +   buffer->base.context = pipe;
>> +   buffer->base.destroy = nv84_video_buffer_destroy;
>> +   buffer->base.chroma_format = template->chroma_format;
>> +   buffer->base.width = template->width;
>> +   buffer->base.height = template->height;
>> +   buffer->base.get_sampler_view_planes = nv84_video_buffer_sampler_view_planes;
>> +   buffer->base.get_sampler_view_components = nv84_video_buffer_sampler_view_components;
>> +   buffer->base.get_surfaces = nv84_video_buffer_surfaces;
>> +   buffer->base.interlaced = true;
> By storing the number of planes, will be able to demagic some constants
> later on
>    buffer->num_planes = 2;

nvc0_video does that. I find that incredibly confusing. It's always 2,
and they are of diff size. I prefer my bike-shed color :)

>
>> +
>> +   memset(&templ, 0, sizeof(templ));
>> +   templ.target = PIPE_TEXTURE_2D_ARRAY;
>> +   templ.depth0 = 1;
>> +   templ.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
>> +   templ.format = PIPE_FORMAT_R8_UNORM;
>> +   templ.width0 = align(template->width, 2);
>> +   templ.height0 = align(template->height, 4) / 2;
>> +   templ.flags = NV50_RESOURCE_FLAG_VIDEO;
>> +   templ.array_size = 2;
>> +
>> +   cfg.nv50.tile_mode = 0x20;
>> +   cfg.nv50.memtype = 0x70;
>> +
>> +   buffer->resources[0] = pipe->screen->resource_create(pipe->screen, &templ);
>> +   if (!buffer->resources[0])
>> +      goto error;
>> +
>> +   templ.format = PIPE_FORMAT_R8G8_UNORM;
>> +   templ.width0 /= 2;
>> +   templ.height0 /= 2;
>> +   buffer->resources[1] = pipe->screen->resource_create(pipe->screen, &templ);
>> +   if (!buffer->resources[1])
>> +      goto error;
> I believe that the nvc0 version of the code is easier to read (bikeshed)
>
> for (i = 1; i < buffer->num_planes; ++i) {

num_planes is always 2. for (i = 1; i < 2; i++) seems like a waste...

>    buffer->resources[i] = pipe->screen->resource_create(pipe->screen,
> &templ);
>    if (!buffer->resources[i])
>       goto error;
> }
>
>> +
>> +   mt0 = nv50_miptree(buffer->resources[0]);
>> +   mt1 = nv50_miptree(buffer->resources[1]);
>> +
>> +   bo_size = mt0->total_size + mt1->total_size;
>> +   if (nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP, 0,
>> +                      bo_size, &cfg, &buffer->interlaced))
>> +      goto error;
>> +   /* XXX Change reference frame management so that this is only allocated in
>> +    * the decoder when necessary. */
>> +   if (nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP, 0,
>> +                      bo_size, &cfg, &buffer->full))
>> +      goto error;
>> +
>> +   mt0->base.bo = buffer->interlaced;
>> +   mt0->base.domain = NOUVEAU_BO_VRAM;
>> +   mt0->base.offset = 0;
>> +   mt0->base.address = buffer->interlaced->offset;
> IMHO this looks a bit easier to grasp
>    mt0->base.address = buffer->interlaced->offset + mt0->base.offset;

Agreed.

>
>> +   nouveau_bo_ref(buffer->interlaced, &empty);
>> +
>> +   mt1->base.bo = buffer->interlaced;
>> +   mt1->base.domain = NOUVEAU_BO_VRAM;
>> +   mt1->base.offset = mt0->layer_stride * 2;
>> +   mt1->base.address = buffer->interlaced->offset + mt0->layer_stride * 2;
> Similar
>    mt1->base.address = buffer->interlaced->offset + mt1->base.offset;

Yep.

>
>> +   nouveau_bo_ref(buffer->interlaced, &empty);
>> +
>> +   memset(&sv_templ, 0, sizeof(sv_templ));
>> +   for (component = 0, i = 0; i < 2; ++i ) {
>    for (component = 0, i = 0; i < buffer->num_planes; ++i ) {
>
>> +      struct pipe_resource *res = buffer->resources[i];
>> +      unsigned nr_components = util_format_get_nr_components(res->format);
>> +
>> +      u_sampler_view_default_template(&sv_templ, res, res->format);
>> +      buffer->sampler_view_planes[i] = pipe->create_sampler_view(pipe, res, &sv_templ);
>> +      if (!buffer->sampler_view_planes[i])
>> +         goto error;
>> +
>> +      for (j = 0; j < nr_components; ++j, ++component) {
>> +         sv_templ.swizzle_r = sv_templ.swizzle_g = sv_templ.swizzle_b = PIPE_SWIZZLE_RED + j;
>> +         sv_templ.swizzle_a = PIPE_SWIZZLE_ONE;
>> +
>> +         buffer->sampler_view_components[component] = pipe->create_sampler_view(pipe, res, &sv_templ);
>> +         if (!buffer->sampler_view_components[component])
>> +            goto error;
>> +      }
>> +   }
>> +
>> +   memset(&surf_templ, 0, sizeof(surf_templ));
>> +   for (j = 0; j < 2; ++j) {
>    for (j = 0; j < buffer->num_planes; ++j) {
>
>> +      surf_templ.format = buffer->resources[j]->format;
>> +      surf_templ.u.tex.first_layer = surf_templ.u.tex.last_layer = 0;
>> +      buffer->surfaces[j * 2] = pipe->create_surface(pipe, buffer->resources[j], &surf_templ);
>> +      if (!buffer->surfaces[j * 2])
>> +         goto error;
>> +
>> +      surf_templ.u.tex.first_layer = surf_templ.u.tex.last_layer = 1;
>> +      buffer->surfaces[j * 2 + 1] = pipe->create_surface(pipe, buffer->resources[j], &surf_templ);
>> +      if (!buffer->surfaces[j * 2 + 1])
>> +         goto error;
>> +   }
>> +
>> +   return &buffer->base;
>> +
>> +error:
>> +   nv84_video_buffer_destroy(&buffer->base);
>> +   return NULL;
>> +}
>> +
>> +int
>> +nv84_screen_get_video_param(struct pipe_screen *pscreen,
>> +                            enum pipe_video_profile profile,
>> +                            enum pipe_video_cap param)
>> +{
>> +   switch (param) {
>> +   case PIPE_VIDEO_CAP_SUPPORTED:
>> +      return u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG4_AVC ||
>> +         u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12;
>       switch (u_reduce_video_profile(profile)) {
>       case PIPE_VIDEO_CODEC_MPEG12:
>       case PIPE_VIDEO_CODEC_MPEG4_AVC:
>          return true;
>       case PIPE_VIDEO_CODEC_VC1:
>          /* TODO: Hardware is capable of IDCT acceleration for VC1*/
>       case PIPE_VIDEO_CODEC_MPEG4:
>       default:
>          return false;
>       }

Why? switch inside a switch is a bit ugly.

>
>> +   case PIPE_VIDEO_CAP_NPOT_TEXTURES:
>> +      return 1;
>> +   case PIPE_VIDEO_CAP_MAX_WIDTH:
>> +   case PIPE_VIDEO_CAP_MAX_HEIGHT:
>> +      return 2048;
>> +   case PIPE_VIDEO_CAP_PREFERED_FORMAT:
>> +      return PIPE_FORMAT_NV12;
>> +   case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED:
>> +   case PIPE_VIDEO_CAP_PREFERS_INTERLACED:
>> +      return true;
>> +   case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE:
>> +      return false;
>> +   default:
>> +      debug_printf("unknown video param: %d\n", param);
>> +      return 0;
>> +   }
>> +}
>> +
>> +boolean
>> +nv84_screen_video_supported(struct pipe_screen *screen,
>> +                            enum pipe_format format,
>> +                            enum pipe_video_profile profile)
>> +{
>> +   return format == PIPE_FORMAT_NV12;
> Will this work when we have XVMC_VL set ?

Hm, well, XVMC_VL totally doesn't work for me... crashes in the shader
somewhere. I just copied that logic from nvc0... If anything this
would be too restrictive...

>
>> +}
>> diff --git a/src/gallium/drivers/nv50/nv84_video.h b/src/gallium/drivers/nv50/nv84_video.h
>> new file mode 100644
>> index 0000000..4ff8cf3
>> --- /dev/null
>> +++ b/src/gallium/drivers/nv50/nv84_video.h
> ...
>> +struct nv84_video_buffer {
>> +   struct pipe_video_buffer base;
>    unsigned num_planes;
>
>> +   struct pipe_resource *resources[VL_NUM_COMPONENTS];
>> +   struct pipe_sampler_view *sampler_view_planes[VL_NUM_COMPONENTS];
>> +   struct pipe_sampler_view *sampler_view_components[VL_NUM_COMPONENTS];
>> +   struct pipe_surface *surfaces[VL_NUM_COMPONENTS * 2];
>> +
>> +   struct nouveau_bo *interlaced, *full;
>> +   int mvidx;
>> +   unsigned frame_num, frame_num_max;
>> +};
>> +
>
> Looking at the params associated with each video engine, I was wondering
> about compacting it into a struct (names chosen are the first thing that
> came to mind)
>
> struct nv84_decoder_eng {
>    struct nouveau_object *obj;
>    struct nouveau_object *channel;
>    struct nouveau_pushbuf *pushbuf;
>    struct nouveau_bufctx *bufctx;
>
>    struct nouveau_bo *fw;
>    struct nouveau_bo *data;
> }
>
> and then having an enum for the different engine types
>
> enum nv84_decoder_eng_type
> {
>    BSP = 0,
>    VP
> };
>
> #define NV84_DECODER_ENG_NUM VP + 1

Hmmm.... it's a point... the downside is that things become a little
more confusing. I think there's some clarity to just having variable
names like "bsp_foo" and "vp_foo" instead of "engines[VP].foo". And
there are only 2 engines.

One advantage of your approach is that it'd save on some
initialization code. I dunno. If someone else likes this approach too,
I'll try it out.

>
>> +struct nv84_decoder {
>> +   struct pipe_video_decoder base;
>> +   struct nouveau_client *client;
>> +   struct nouveau_object *bsp_channel, *vp_channel, *bsp, *vp;
>> +   struct nouveau_pushbuf *bsp_pushbuf, *vp_pushbuf;
>> +   struct nouveau_bufctx *bsp_bufctx, *vp_bufctx;
> Then the struct will look a bit cleaner
> struct nv84_decoder {
>    struct pipe_video_decoder base;
>    struct nouveau_client *client;
>    struct nv84_decoder_eng engs[NV84_DECODER_ENG_NUM];
>
>
>> +
>> +   struct nouveau_bo *bsp_fw, *bsp_data;
>> +   struct nouveau_bo *vp_fw, *vp_data;
>> +   struct nouveau_bo *mbring, *vpring;
>> +
>> +   /*
>> +    * states:
>> +    *  0: init
>> +    *  1: vpring/mbring cleared, bsp is ready
>> +    *  2: bsp is done, vp is ready
>> +    * and then vp it back to 1
>> +    */
>> +   struct nouveau_bo *fence;
>> +
>> +   struct nouveau_bo *bitstream;
>> +   struct nouveau_bo *vp_params;
>> +
>> +   size_t vp_fw2_offset;
>> +
>> +   unsigned frame_mbs, frame_size;
>> +   /* VPRING layout:
>> +        RESIDUAL
>> +        CTRL
>> +        DEBLOCK
>> +        0x1000
>> +   */
>> +   unsigned vpring_deblock, vpring_residual, vpring_ctrl;
>> +
>> +
>> +   struct vl_mpg12_bs *mpeg12_bs;
>> +
>> +   struct nouveau_bo *mpeg12_bo;
>> +   void *mpeg12_mb_info;
>> +   uint16_t *mpeg12_data;
>> +   const int *zscan;
>> +   uint8_t mpeg12_intra_matrix[64];
>> +   uint8_t mpeg12_non_intra_matrix[64];
>> +};
>> +
> ...
>
>> +static INLINE uint32_t mb(uint32_t coord)
>> +{
>> +   return (coord + 0xf)>>4;
>> +}
>> +
>> +static INLINE uint32_t mb_half(uint32_t coord)
>> +{
>> +   return (coord + 0x1f)>>5;
>> +}
> How about moving these in nouveau_video.h ? (and removing the duplicate
> copy from nvc0_video.h)
>
> Might be better as a follow on patch

Yeah. And as VP3/4 support comes out, I suspect a lot of nvc0_video
will get refactored out since they're likely very similar.

>
> ...
>> diff --git a/src/gallium/drivers/nv50/nv84_video_vp.c b/src/gallium/drivers/nv50/nv84_video_vp.c
>> new file mode 100644
>> index 0000000..60c0848
>> --- /dev/null
>> +++ b/src/gallium/drivers/nv50/nv84_video_vp.c
>> @@ -0,0 +1,521 @@
>> +/*
>> + * Copyright 2013 Ilia Mirkin
>> + *
>> + * Permission is hereby granted, free of charge, to any person obtaining a
>> + * copy of this software and associated documentation files (the "Software"),
>> + * to deal in the Software without restriction, including without limitation
>> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
>> + * and/or sell copies of the Software, and to permit persons to whom the
>> + * Software is furnished to do so, subject to the following conditions:
>> + *
>> + * The above copyright notice and this permission notice shall be included in
>> + * all copies or substantial portions of the Software.
>> + *
>> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
>> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
>> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
>> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
>> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
>> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
>> + * OTHER DEALINGS IN THE SOFTWARE.
>> + */
>> +
>> +#include <immintrin.h>
> Wrap this include in
> #ifdef __SSE4_2__
> #endif
>
> considering its definitions are used in a similar block
> ...

I guess. There shouldn't be any harm to including it, but OK.

>
>> +void
>> +nv84_decoder_vp_h264(struct nv84_decoder *dec,
>> +                     struct pipe_h264_picture_desc *desc,
>> +                     struct nv84_video_buffer *dest)
>> +{
> ...
>
>> +   for (i = 0; i < 2; i++) {
>    for (i = 0; i < dest->num_planes; i++) {
>
>> +      struct nv50_miptree *mt = nv50_miptree(dest->resources[i]);
>> +      mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
>> +   }
>> +
>> +   PUSH_KICK (push);
>> +}
>> +
> ...
>
>> +void
>> +nv84_decoder_vp_mpeg12_mb(struct nv84_decoder *dec,
>> +                          struct pipe_mpeg12_picture_desc *desc,
>> +                          const struct pipe_mpeg12_macroblock *macrob)
>> +{
> ...
>
>> +#ifdef __SSE4_2__
> IMHO this may produce non portable binaries in case of aggressive
> mtune/march flags. I'm not objecting against it just pointing out

mtune shouldn't cause it to be defined. if you're specifying it on
march, then that's the target arch, so... the compiler is free to use
corei7 instructions anyways. Although distros will (likely) never use
this, but people on, say, gentoo, will get the benefits. A much better
approach would be to build the code anyways, and then select the best
version at runtime.

$ gcc -dM -E - < /dev/null | grep SSE4
$ gcc -mtune=corei7 -dM -E - < /dev/null | grep SSE4
$ gcc -march=corei7 -dM -E - < /dev/null | grep SSE4
#define __SSE4_1__ 1
#define __SSE4_2__ 1


>
>> +void
>> +nv84_decoder_vp_mpeg12(struct nv84_decoder *dec,
>> +                       struct pipe_mpeg12_picture_desc *desc,
>> +                       struct nv84_video_buffer *dest)
>> +{
> ...
>
>> +   for (i = 0; i < 2; i++) {
>    for (i = 0; i < dest->num_planes; i++) {
>
>
> Cheers
> Emil
>
>> +      struct nv50_miptree *mt = nv50_miptree(dest->resources[i]);
>> +      mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
>> +   }
>> +   PUSH_KICK (push);
>> +}
>>
>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [Nouveau] [PATCH] nv50: H.264/MPEG2 decoding support via VP2, available on NV84-NV96, NVA0
  2013-06-29 20:21   ` Ilia Mirkin
@ 2013-06-30  0:33     ` Emil Velikov
  2013-06-30  1:02       ` Ilia Mirkin
  0 siblings, 1 reply; 7+ messages in thread
From: Emil Velikov @ 2013-06-30  0:33 UTC (permalink / raw)
  To: Ilia Mirkin; +Cc: nouveau, Maarten Lankhorst, Emil Velikov, mesa-dev

On 29/06/13 21:21, Ilia Mirkin wrote:
> On Sat, Jun 29, 2013 at 2:07 PM, Emil Velikov <emil.l.velikov@gmail.com> wrote:
>> Hi Ilia,
>>
>> On 27/06/13 12:26, Ilia Mirkin wrote:
>>> Adds H.264 and MPEG2 codec support via VP2, using firmware from the
>>> blob. Acceleration is supported at the bitstream level for H.264 and
>>> IDCT level for MPEG2.
>>>
>>> Known issues:
>>>  - H.264 interlaced doesn't render properly
>>>  - H.264 shows very occasional artifacts on a small fraction of videos
>>>  - MPEG2 + VDPAU shows frequent but small artifacts, which aren't there
>>>    when using XvMC on the same videos
>>>
>>> Signed-off-by: Ilia Mirkin <imirkin@alum.mit.edu>
>>
>>
>> Big thanks for working on this
>>
>> I believe the hardware is capable of accelerating IDCT for VC1. Do you
>> have any plans for it ?
> 
> Nope. With MPEG, there's XvMC, which sends the macroblocks directly.
> And there's an existing MPEG parser in gallium which can be used for
> VDPAU. With VC-1, no such thing as XvMC. Which means that I'd have to
> implement a bitstream parser in gallium. The spec is closed, and I'm
> not at all familiar with the concepts used in VC-1.
I thought that gallium already has a vc1 bitstream parser. Anyway if
anyone is interested the libavcodec project can be useful in that regard

> 
>>
>> As far as I know mesa in general is keen on keeping trailing statements
>> on the next line, as well as 78(80) characters line length.
> 
> I tried to do that... in the places where I violate that I felt that
> it enhanced readability. Feel free to point out places where you
> disagree. Also if it's a hard policy (e.g. as in the linux kernel), I
> can go through and reformat.
There is a document describing the Mesa Coding Style[1], although
enforcing it is another question

My main point was statements such as

   if (ret) return NULL

which are rather hard to see when surrounded by compact code.

[1] http://www.mesa3d.org/devinfo.html
> 
>>
>>> ---
>>>
>>> I did try to work out the known issues above, but so far to no avail.
>>>
>>> The kernel support for these engines is not in mainline yet, but it's likely
>>> going to appear in 3.11. I figured that there would be a bunch of feedback so
>>> I might as well send it out early.
>>>
>>> I played around a lot with XvMC performance, the mb function shows up as #1 in
>>> the profiles, with the SSE4.2 optimizations I put in, it drops to #2.
>>> Something clever can likely be done to improve VDPAU performance as well,
>>> e.g. using SSE to do the inverse quantization operations, but I've left that
>> Curious how many machines with vp2 card have a sse4.2 capable CPU? Mine
>> is only sse4.1 ;(
> 
> At least one (mine). SSE4.2 appeared on Core i7 (Nehalem), AFAIK.
> Other optimizations may be possible with just SSE2, but I couldn't
> think of anything particularly clever. Those PCMPESTRM instructions
> are _really_ useful to skip over the useless 0's. I guess something
> could be done with PCMPEQW and PMOVMSKB (wish there were a
> PMOVMSKW...). I'll play with it. But the plain-C loop isn't so bad
> either.
> 
>>
>>> out. (It gets tricky because a lot of the data is 0's, so it's unclear whether
>>> it's faster to use SSE to do operations on everything or one-at-a-time on the
>>> non-0's like I have it now.) Even with these, XvMC ends up ~20% faster than
>>> plain CPU decoding, and likely that percent improves on older CPUs that can't
>>> decode MPEG2 quite as quickly. VDPAU provides further improvements (likely
>>> because it is able to skip mb's while XvMC can't), but there are artifacts for
>>> reasons unknown.
>>>
>>> Note that in order to get XvMC to work, you need my previous patch (or
>>> something similar) since otherwise libXvMCnouveau can't be dlopen'd:
>>> http://lists.freedesktop.org/archives/mesa-dev/2013-June/040949.html
>>>
>>> If you want to test it out, the kernel patch:
>>> http://lists.freedesktop.org/archives/nouveau/2013-June/012821.html
>>>
>>> Firmware:
>>> https://github.com/imirkin/re-vp2/blob/master/extract_firmware.py
>>>
>>>  src/gallium/drivers/nv50/Makefile.sources |   5 +-
>>>  src/gallium/drivers/nv50/nv50_context.c   |  13 +-
>>>  src/gallium/drivers/nv50/nv50_context.h   |  24 +
>>>  src/gallium/drivers/nv50/nv50_miptree.c   |  27 ++
>>>  src/gallium/drivers/nv50/nv50_resource.h  |   1 +
>>>  src/gallium/drivers/nv50/nv50_screen.c    |  13 +-
>>>  src/gallium/drivers/nv50/nv50_winsys.h    |   4 +
>>>  src/gallium/drivers/nv50/nv84_video.c     | 778 ++++++++++++++++++++++++++++++
>>>  src/gallium/drivers/nv50/nv84_video.h     | 134 +++++
>>>  src/gallium/drivers/nv50/nv84_video_bsp.c | 251 ++++++++++
>>>  src/gallium/drivers/nv50/nv84_video_vp.c  | 521 ++++++++++++++++++++
>>>  11 files changed, 1768 insertions(+), 3 deletions(-)
>>>  create mode 100644 src/gallium/drivers/nv50/nv84_video.c
>>>  create mode 100644 src/gallium/drivers/nv50/nv84_video.h
>>>  create mode 100644 src/gallium/drivers/nv50/nv84_video_bsp.c
>>>  create mode 100644 src/gallium/drivers/nv50/nv84_video_vp.c
>>>
>> ...
>>> diff --git a/src/gallium/drivers/nv50/nv84_video.c b/src/gallium/drivers/nv50/nv84_video.c
>>> new file mode 100644
>>> index 0000000..064178c
>>> --- /dev/null
>>> +++ b/src/gallium/drivers/nv50/nv84_video.c
>>> @@ -0,0 +1,778 @@
>> ...
>>> +static int
>>> +nv84_copy_firmware(const char *path, void *dest, size_t len)
>>    ssize_t len
>> To prevent signed/unsigned issues in conditional below
>>
>>> +{
>>> +   int fd = open(path, O_RDONLY | O_CLOEXEC);
>>> +   ssize_t r;
>>> +   if (fd < 0) {
>>> +      fprintf(stderr, "opening firmware file %s failed: %m\n", path);
>>> +      return 1;
>>> +   }
>>> +   r = read(fd, dest, len);
>>> +   close(fd);
>>> +
>>> +   if (r != len) {
>> Here        ^^
> 
> Yes, in case that len is 2^64 - 1... but I'll change it :)
> 
>>
>>> +      fprintf(stderr, "reading firwmare file %s failed: %m\n", path);
>>> +      return 1;
>>> +   }
>>> +
>>> +   return 0;
>>> +}
>>> +
>> ...
>>> +static void
>>> +nv84_decoder_decode_bitstream_mpeg12(struct pipe_video_decoder *decoder,
>>> +                                     struct pipe_video_buffer *video_target,
>>> +                                     struct pipe_picture_desc *picture,
>>> +                                     unsigned num_buffers,
>>> +                                     const void *const *data,
>>> +                                     const unsigned *num_bytes)
>>> +{
>>> +   struct nv84_decoder *dec = (struct nv84_decoder *)decoder;
>>> +   struct nv84_video_buffer *target = (struct nv84_video_buffer *)video_target;
>>> +
>>> +   struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
>>> +
>>> +   assert(target->base.buffer_format == PIPE_FORMAT_NV12);
>> This can be written as
>>    assert(video_target->buffer_format == PIPE_FORMAT_NV12);
> 
> Good point. The code was a bit different at an earlier point.
> 
>>
>>> +
>>> +   vl_mpg12_bs_decode(dec->mpeg12_bs,
>>> +                      video_target,
>>> +                      desc,
>>> +                      num_buffers,
>>> +                      data,
>>> +                      num_bytes);
>> And then the temporary variables can be removed, as you've done in
>> nv84_decoder_end_frame_mpeg12()
>>
>>> +}
>>> +
>> ...
>>> +
>>> +struct pipe_video_decoder *
>>> +nv84_create_decoder(struct pipe_context *context,
>>> +                    enum pipe_video_profile profile,
>>> +                    enum pipe_video_entrypoint entrypoint,
>>> +                    enum pipe_video_chroma_format chroma_format,
>>> +                    unsigned width, unsigned height,
>>> +                    unsigned max_references,
>>> +                    bool chunked_decode)
>>> +{
>>> +   struct nv50_context *nv50 = (struct nv50_context *)context;
>>> +   struct nouveau_screen *screen = &nv50->screen->base;
>>> +   struct nv84_decoder *dec;
>>> +   struct nouveau_pushbuf *bsp_push, *vp_push;
>>> +   struct nv50_surface surf;
>>> +   struct nv50_miptree mip;
>>> +   union pipe_color_union color;
>>> +   struct nv04_fifo nv04_data = { .vram = 0xbeef0201, .gart = 0xbeef0202 };
>>> +   int ret, i;
>>> +   int is_h264 = u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG4_AVC;
>>> +   int is_mpeg12 = u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12;
>>> +   struct nouveau_pushbuf_refn fence_ref[] = {
>>> +      { NULL, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
>>> +   };
>>> +
>>> +
>>> +   if (getenv("XVMC_VL"))
>>> +      return vl_create_decoder(context, profile, entrypoint,
>>> +                               chroma_format, width, height,
>>> +                               max_references, chunked_decode);
>>> +
>>> +   if ((is_h264 && entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM) ||
>>> +       (is_mpeg12 && entrypoint > PIPE_VIDEO_ENTRYPOINT_IDCT)) {
>>> +      debug_printf("%x\n", entrypoint);
>>> +      return NULL;
>>> +   }
>>> +
>>> +   if (!is_h264 && !is_mpeg12) {
>>> +      debug_printf("invalid profile: %x\n", profile);
>>> +      return NULL;
>>> +   }
>>> +
>>> +   dec = CALLOC_STRUCT(nv84_decoder);
>>> +   if (!dec) return NULL;
>>> +
>>> +   dec->base.context = context;
>>> +   dec->base.profile = profile;
>>> +   dec->base.entrypoint = entrypoint;
>>> +   dec->base.chroma_format = chroma_format;
>>> +   dec->base.width = width;
>>> +   dec->base.height = height;
>>> +   dec->base.max_references = max_references;
>>> +   dec->base.destroy = nv84_decoder_destroy;
>>> +   dec->base.flush = nv84_decoder_flush;
>>> +   if (is_h264) {
>>> +      dec->base.decode_bitstream = nv84_decoder_decode_bitstream_h264;
>>> +      dec->base.begin_frame = nv84_decoder_begin_frame_h264;
>>> +      dec->base.end_frame = nv84_decoder_end_frame_h264;
>>> +
>>> +      dec->frame_mbs = mb(dec->base.width) * mb_half(dec->base.height) * 2;
>>> +      dec->frame_size = dec->frame_mbs << 8;
>>> +      dec->vpring_deblock = align(0x30 * dec->frame_mbs, 0x100);
>>> +      dec->vpring_residual = 0x2000 + MAX2(0x32000, 0x600 * dec->frame_mbs);
>>> +      dec->vpring_ctrl = MAX2(0x10000, align(0x1080 + 0x144 * dec->frame_mbs, 0x100));
>>> +   } else if (is_mpeg12) {
>>> +      dec->base.decode_macroblock = nv84_decoder_decode_macroblock;
>>> +      dec->base.begin_frame = nv84_decoder_begin_frame_mpeg12;
>>> +      dec->base.end_frame = nv84_decoder_end_frame_mpeg12;
>>> +
>>> +      if (entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
>>> +         dec->mpeg12_bs = CALLOC_STRUCT(vl_mpg12_bs);
>>> +         if (!dec->mpeg12_bs)
>>> +            goto fail;
>>> +         vl_mpg12_bs_init(dec->mpeg12_bs, &dec->base);
>>> +         dec->base.decode_bitstream = nv84_decoder_decode_bitstream_mpeg12;
>>> +      }
>>> +   } else {
>>> +      goto fail;
>> Seems to be handled already by - if (!is_h264 && !is_mpeg12)...
> 
> Yes. But I disliked relying on that, so I decided to leave it in.
> 
>>
>>> +   }
>>> +
>>> +   ret = nouveau_client_new(screen->device, &dec->client);
>>> +   if (ret)
>>> +      goto fail;
>> Is there any particular reason for using a variable to store the return
>> value through this functions? Me thinks it can be safely purged, making
>> the code a bit cleaner
> 
> It was like that in nvc0_video and a lot of other code. I think the
> code looks a little odd when you have
> 
> if (super-long multi-line statement)
>   goto fail
There are cases where you've left the "super-long multi-line statement"
in the conditional and others where you did not. I suggested changing
the one that may produce smaller code and is more appealing imho

> 
>>
>>> +
>>> +   if (is_h264) {
>>> +      ret = nouveau_object_new(&screen->device->object, 0,
>>> +                               NOUVEAU_FIFO_CHANNEL_CLASS,
>>> +                               &nv04_data, sizeof(nv04_data), &dec->bsp_channel);
>> ...
>>
>>> +   if (is_h264) {
>>> +      /* Zero out some parts of mbring/vpring. there's gotta be some cleaner way
>>> +       * of doing this... perhaps makes sense to just copy the relevant logic
>>> +       * here. */
>>> +      color.f[0] = color.f[1] = color.f[2] = color.f[3] = 0;
>>> +      surf.offset = dec->frame_size;
>>> +      surf.width = 64;
>>> +      surf.height = (max_references + 1) * dec->frame_mbs / 4;
>>> +      surf.depth = 1;
>>> +      surf.base.format = PIPE_FORMAT_B8G8R8A8_UNORM;
>>> +      surf.base.u.tex.level = 0;
>>> +      surf.base.texture = &mip.base.base;
>>> +      mip.level[0].tile_mode = 0;
>>> +      mip.level[0].pitch = surf.width * 4;
>>> +      mip.base.domain = NOUVEAU_BO_VRAM;
>>> +      mip.base.bo = dec->mbring;
>>> +      context->clear_render_target(context, (struct pipe_surface *)&surf, &color, 0, 0, 64, 4760);
You can drop the typecast here
s/(struct pipe_surface *)&surf/&surf.base/

>>> +      surf.offset = dec->vpring->size / 2 - 0x1000;
>>> +      surf.width = 1024;
>>> +      surf.height = 1;
>>> +      mip.level[0].pitch = surf.width * 4;
>>> +      mip.base.bo = dec->vpring;
>>> +      context->clear_render_target(context, (struct pipe_surface *)&surf, &color, 0, 0, 1024, 1);
Ditto

>>> +      surf.offset = dec->vpring->size - 0x1000;
>>> +      context->clear_render_target(context, (struct pipe_surface *)&surf, &color, 0, 0, 1024, 1);
Ditto

>>> +
>>> +      PUSH_SPACE(screen->pushbuf, 5);
>>> +      fence_ref[0].bo = dec->fence;
>>> +      nouveau_pushbuf_refn(screen->pushbuf, fence_ref, 1);
>>> +      /* The clear_render_target is done via 3D engine, so use it to write to a
>>> +       * sempahore to indicate that it's done.
>>> +       */
>>> +      BEGIN_NV04(screen->pushbuf, SUBC_3D(0x1b00), 4);
>>> +      PUSH_DATAh(screen->pushbuf, dec->fence->offset);
>>> +      PUSH_DATA (screen->pushbuf, dec->fence->offset);
>>> +      PUSH_DATA (screen->pushbuf, 1);
>>> +      PUSH_DATA (screen->pushbuf, 0xf010);
>>> +      PUSH_KICK (screen->pushbuf);
>>> +
>>> +      PUSH_SPACE(bsp_push, 2 + 12 + 2 + 4 + 3);
>>> +
>>> +      BEGIN_NV04(bsp_push, SUBC_BSP(NV01_SUBCHAN_OBJECT), 1);
>>> +      PUSH_DATA (bsp_push, dec->bsp->handle);
>>> +
>>> +      BEGIN_NV04(bsp_push, SUBC_BSP(0x180), 11);
>>> +      for (i = 0; i < 11; i++)
>> Any idea where 11 comes from ? Is it related to some other parameter ?
> 
> These are the dma indices (e.g. when i use 0x654321, each nibble is a
> dmaidx, which is an offset into these 0x180 values). It's the part of
> the whole thing that I understand the least... in theory I should be
> able to just initialize one of them and use it throughout, but I
> haven't been brave enough to put that theory to the test. This is how
> the blob does it.
If it works don't break it ^_^

> 
>>
>>> +         PUSH_DATA(bsp_push, nv04_data.vram);
>>> +      BEGIN_NV04(bsp_push, SUBC_BSP(0x1b8), 1);
>>> +      PUSH_DATA (bsp_push, nv04_data.vram);
>>> +
>> ...
>>
>>> +struct pipe_video_buffer *
>>> +nv84_video_buffer_create(struct pipe_context *pipe,
>>> +                         const struct pipe_video_buffer *template)
>>> +{
>> ...
>>> +   buffer->base.buffer_format = template->buffer_format;
>>> +   buffer->base.context = pipe;
>>> +   buffer->base.destroy = nv84_video_buffer_destroy;
>>> +   buffer->base.chroma_format = template->chroma_format;
>>> +   buffer->base.width = template->width;
>>> +   buffer->base.height = template->height;
>>> +   buffer->base.get_sampler_view_planes = nv84_video_buffer_sampler_view_planes;
>>> +   buffer->base.get_sampler_view_components = nv84_video_buffer_sampler_view_components;
>>> +   buffer->base.get_surfaces = nv84_video_buffer_surfaces;
>>> +   buffer->base.interlaced = true;
>> By storing the number of planes, will be able to demagic some constants
>> later on
>>    buffer->num_planes = 2;
> 
> nvc0_video does that. I find that incredibly confusing. It's always 2,
> and they are of diff size. I prefer my bike-shed color :)
> 
>>
>>> +
>>> +   memset(&templ, 0, sizeof(templ));
>>> +   templ.target = PIPE_TEXTURE_2D_ARRAY;
>>> +   templ.depth0 = 1;
>>> +   templ.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
>>> +   templ.format = PIPE_FORMAT_R8_UNORM;
>>> +   templ.width0 = align(template->width, 2);
>>> +   templ.height0 = align(template->height, 4) / 2;
>>> +   templ.flags = NV50_RESOURCE_FLAG_VIDEO;
>>> +   templ.array_size = 2;
>>> +
>>> +   cfg.nv50.tile_mode = 0x20;
>>> +   cfg.nv50.memtype = 0x70;
>>> +
>>> +   buffer->resources[0] = pipe->screen->resource_create(pipe->screen, &templ);
>>> +   if (!buffer->resources[0])
>>> +      goto error;
>>> +
>>> +   templ.format = PIPE_FORMAT_R8G8_UNORM;
>>> +   templ.width0 /= 2;
>>> +   templ.height0 /= 2;
>>> +   buffer->resources[1] = pipe->screen->resource_create(pipe->screen, &templ);
>>> +   if (!buffer->resources[1])
>>> +      goto error;
>> I believe that the nvc0 version of the code is easier to read (bikeshed)
>>
>> for (i = 1; i < buffer->num_planes; ++i) {
> 
> num_planes is always 2. for (i = 1; i < 2; i++) seems like a waste...
> 
>>    buffer->resources[i] = pipe->screen->resource_create(pipe->screen,
>> &templ);
>>    if (!buffer->resources[i])
>>       goto error;
>> }
>>
>>> +
>>> +   mt0 = nv50_miptree(buffer->resources[0]);
>>> +   mt1 = nv50_miptree(buffer->resources[1]);
>>> +
>>> +   bo_size = mt0->total_size + mt1->total_size;
>>> +   if (nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP, 0,
>>> +                      bo_size, &cfg, &buffer->interlaced))
>>> +      goto error;
>>> +   /* XXX Change reference frame management so that this is only allocated in
>>> +    * the decoder when necessary. */
>>> +   if (nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP, 0,
>>> +                      bo_size, &cfg, &buffer->full))
>>> +      goto error;
>>> +
>>> +   mt0->base.bo = buffer->interlaced;
>>> +   mt0->base.domain = NOUVEAU_BO_VRAM;
>>> +   mt0->base.offset = 0;
>>> +   mt0->base.address = buffer->interlaced->offset;
>> IMHO this looks a bit easier to grasp
>>    mt0->base.address = buffer->interlaced->offset + mt0->base.offset;
> 
> Agreed.
> 
>>
>>> +   nouveau_bo_ref(buffer->interlaced, &empty);
>>> +
>>> +   mt1->base.bo = buffer->interlaced;
>>> +   mt1->base.domain = NOUVEAU_BO_VRAM;
>>> +   mt1->base.offset = mt0->layer_stride * 2;
>>> +   mt1->base.address = buffer->interlaced->offset + mt0->layer_stride * 2;
>> Similar
>>    mt1->base.address = buffer->interlaced->offset + mt1->base.offset;
> 
> Yep.
> 
>>
>>> +   nouveau_bo_ref(buffer->interlaced, &empty);
>>> +
>>> +   memset(&sv_templ, 0, sizeof(sv_templ));
>>> +   for (component = 0, i = 0; i < 2; ++i ) {
>>    for (component = 0, i = 0; i < buffer->num_planes; ++i ) {
>>
>>> +      struct pipe_resource *res = buffer->resources[i];
>>> +      unsigned nr_components = util_format_get_nr_components(res->format);
>>> +
>>> +      u_sampler_view_default_template(&sv_templ, res, res->format);
>>> +      buffer->sampler_view_planes[i] = pipe->create_sampler_view(pipe, res, &sv_templ);
>>> +      if (!buffer->sampler_view_planes[i])
>>> +         goto error;
>>> +
>>> +      for (j = 0; j < nr_components; ++j, ++component) {
>>> +         sv_templ.swizzle_r = sv_templ.swizzle_g = sv_templ.swizzle_b = PIPE_SWIZZLE_RED + j;
>>> +         sv_templ.swizzle_a = PIPE_SWIZZLE_ONE;
>>> +
>>> +         buffer->sampler_view_components[component] = pipe->create_sampler_view(pipe, res, &sv_templ);
>>> +         if (!buffer->sampler_view_components[component])
>>> +            goto error;
>>> +      }
>>> +   }
>>> +
>>> +   memset(&surf_templ, 0, sizeof(surf_templ));
>>> +   for (j = 0; j < 2; ++j) {
>>    for (j = 0; j < buffer->num_planes; ++j) {
>>
>>> +      surf_templ.format = buffer->resources[j]->format;
>>> +      surf_templ.u.tex.first_layer = surf_templ.u.tex.last_layer = 0;
>>> +      buffer->surfaces[j * 2] = pipe->create_surface(pipe, buffer->resources[j], &surf_templ);
>>> +      if (!buffer->surfaces[j * 2])
>>> +         goto error;
>>> +
>>> +      surf_templ.u.tex.first_layer = surf_templ.u.tex.last_layer = 1;
>>> +      buffer->surfaces[j * 2 + 1] = pipe->create_surface(pipe, buffer->resources[j], &surf_templ);
>>> +      if (!buffer->surfaces[j * 2 + 1])
>>> +         goto error;
>>> +   }
>>> +
>>> +   return &buffer->base;
>>> +
>>> +error:
>>> +   nv84_video_buffer_destroy(&buffer->base);
>>> +   return NULL;
>>> +}
>>> +
>>> +int
>>> +nv84_screen_get_video_param(struct pipe_screen *pscreen,
>>> +                            enum pipe_video_profile profile,
>>> +                            enum pipe_video_cap param)
>>> +{
>>> +   switch (param) {
>>> +   case PIPE_VIDEO_CAP_SUPPORTED:
>>> +      return u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG4_AVC ||
>>> +         u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12;
>>       switch (u_reduce_video_profile(profile)) {
>>       case PIPE_VIDEO_CODEC_MPEG12:
>>       case PIPE_VIDEO_CODEC_MPEG4_AVC:
>>          return true;
>>       case PIPE_VIDEO_CODEC_VC1:
>>          /* TODO: Hardware is capable of IDCT acceleration for VC1*/
>>       case PIPE_VIDEO_CODEC_MPEG4:
>>       default:
>>          return false;
>>       }
> 
> Why? switch inside a switch is a bit ugly.
Few silly reasons
* u_reduce_video_profile() is a nice switch statement
* ^^ is inlined and executed twice
* adds a note about VC1 :P

> 
>>
>>> +   case PIPE_VIDEO_CAP_NPOT_TEXTURES:
>>> +      return 1;
>>> +   case PIPE_VIDEO_CAP_MAX_WIDTH:
>>> +   case PIPE_VIDEO_CAP_MAX_HEIGHT:
>>> +      return 2048;
>>> +   case PIPE_VIDEO_CAP_PREFERED_FORMAT:
>>> +      return PIPE_FORMAT_NV12;
>>> +   case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED:
>>> +   case PIPE_VIDEO_CAP_PREFERS_INTERLACED:
>>> +      return true;
>>> +   case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE:
>>> +      return false;
>>> +   default:
>>> +      debug_printf("unknown video param: %d\n", param);
>>> +      return 0;
>>> +   }
>>> +}
>>> +
>>> +boolean
>>> +nv84_screen_video_supported(struct pipe_screen *screen,
>>> +                            enum pipe_format format,
>>> +                            enum pipe_video_profile profile)
>>> +{
>>> +   return format == PIPE_FORMAT_NV12;
>> Will this work when we have XVMC_VL set ?
> 
> Hm, well, XVMC_VL totally doesn't work for me... crashes in the shader
> somewhere. I just copied that logic from nvc0... If anything this
> would be too restrictive...
Hm... true, nouveau is quite restrictive. Radeons are handling it
nicely, with a fallback to vl if uvd is not avaiable. Not that it
matters here :)

> 
>>
>>> +}
>>> diff --git a/src/gallium/drivers/nv50/nv84_video.h b/src/gallium/drivers/nv50/nv84_video.h
>>> new file mode 100644
>>> index 0000000..4ff8cf3
>>> --- /dev/null
>>> +++ b/src/gallium/drivers/nv50/nv84_video.h
>> ...
>>> +struct nv84_video_buffer {
>>> +   struct pipe_video_buffer base;
>>    unsigned num_planes;
>>
>>> +   struct pipe_resource *resources[VL_NUM_COMPONENTS];
>>> +   struct pipe_sampler_view *sampler_view_planes[VL_NUM_COMPONENTS];
>>> +   struct pipe_sampler_view *sampler_view_components[VL_NUM_COMPONENTS];
>>> +   struct pipe_surface *surfaces[VL_NUM_COMPONENTS * 2];
>>> +
>>> +   struct nouveau_bo *interlaced, *full;
>>> +   int mvidx;
>>> +   unsigned frame_num, frame_num_max;
>>> +};
>>> +
>>
>> Looking at the params associated with each video engine, I was wondering
>> about compacting it into a struct (names chosen are the first thing that
>> came to mind)
>>
>> struct nv84_decoder_eng {
>>    struct nouveau_object *obj;
>>    struct nouveau_object *channel;
>>    struct nouveau_pushbuf *pushbuf;
>>    struct nouveau_bufctx *bufctx;
>>
>>    struct nouveau_bo *fw;
>>    struct nouveau_bo *data;
>> }
>>
>> and then having an enum for the different engine types
>>
>> enum nv84_decoder_eng_type
>> {
>>    BSP = 0,
>>    VP
>> };
>>
>> #define NV84_DECODER_ENG_NUM VP + 1
> 
> Hmmm.... it's a point... the downside is that things become a little
> more confusing. I think there's some clarity to just having variable
> names like "bsp_foo" and "vp_foo" instead of "engines[VP].foo". And
> there are only 2 engines.
When I came up with this I was looking at the nvc0 code, which was less
intuitive

   struct nouveau_object *channel[3], *bsp, *vp, *ppp;
   struct nouveau_pushbuf *pushbuf[3];

> 
> One advantage of your approach is that it'd save on some
> initialization code. I dunno. If someone else likes this approach too,
> I'll try it out.
> 
>>
>>> +struct nv84_decoder {
>>> +   struct pipe_video_decoder base;
>>> +   struct nouveau_client *client;
>>> +   struct nouveau_object *bsp_channel, *vp_channel, *bsp, *vp;
>>> +   struct nouveau_pushbuf *bsp_pushbuf, *vp_pushbuf;
>>> +   struct nouveau_bufctx *bsp_bufctx, *vp_bufctx;
>> Then the struct will look a bit cleaner
>> struct nv84_decoder {
>>    struct pipe_video_decoder base;
>>    struct nouveau_client *client;
>>    struct nv84_decoder_eng engs[NV84_DECODER_ENG_NUM];
>>
>>
>>> +
>>> +   struct nouveau_bo *bsp_fw, *bsp_data;
>>> +   struct nouveau_bo *vp_fw, *vp_data;
>>> +   struct nouveau_bo *mbring, *vpring;
>>> +
>>> +   /*
>>> +    * states:
>>> +    *  0: init
>>> +    *  1: vpring/mbring cleared, bsp is ready
>>> +    *  2: bsp is done, vp is ready
>>> +    * and then vp it back to 1
>>> +    */
>>> +   struct nouveau_bo *fence;
>>> +
>>> +   struct nouveau_bo *bitstream;
>>> +   struct nouveau_bo *vp_params;
>>> +
>>> +   size_t vp_fw2_offset;
>>> +
>>> +   unsigned frame_mbs, frame_size;
>>> +   /* VPRING layout:
>>> +        RESIDUAL
>>> +        CTRL
>>> +        DEBLOCK
>>> +        0x1000
>>> +   */
>>> +   unsigned vpring_deblock, vpring_residual, vpring_ctrl;
>>> +
>>> +
>>> +   struct vl_mpg12_bs *mpeg12_bs;
>>> +
>>> +   struct nouveau_bo *mpeg12_bo;
>>> +   void *mpeg12_mb_info;
>>> +   uint16_t *mpeg12_data;
>>> +   const int *zscan;
>>> +   uint8_t mpeg12_intra_matrix[64];
>>> +   uint8_t mpeg12_non_intra_matrix[64];
>>> +};
>>> +
>> ...
>>
>>> +static INLINE uint32_t mb(uint32_t coord)
>>> +{
>>> +   return (coord + 0xf)>>4;
>>> +}
>>> +
>>> +static INLINE uint32_t mb_half(uint32_t coord)
>>> +{
>>> +   return (coord + 0x1f)>>5;
>>> +}
>> How about moving these in nouveau_video.h ? (and removing the duplicate
>> copy from nvc0_video.h)
>>
>> Might be better as a follow on patch
> 
> Yeah. And as VP3/4 support comes out, I suspect a lot of nvc0_video
> will get refactored out since they're likely very similar.
> 
>>
>> ...
>>> diff --git a/src/gallium/drivers/nv50/nv84_video_vp.c b/src/gallium/drivers/nv50/nv84_video_vp.c
>>> new file mode 100644
>>> index 0000000..60c0848
>>> --- /dev/null
>>> +++ b/src/gallium/drivers/nv50/nv84_video_vp.c
>>> @@ -0,0 +1,521 @@
>>> +/*
>>> + * Copyright 2013 Ilia Mirkin
>>> + *
>>> + * Permission is hereby granted, free of charge, to any person obtaining a
>>> + * copy of this software and associated documentation files (the "Software"),
>>> + * to deal in the Software without restriction, including without limitation
>>> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
>>> + * and/or sell copies of the Software, and to permit persons to whom the
>>> + * Software is furnished to do so, subject to the following conditions:
>>> + *
>>> + * The above copyright notice and this permission notice shall be included in
>>> + * all copies or substantial portions of the Software.
>>> + *
>>> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
>>> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
>>> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
>>> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
>>> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
>>> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
>>> + * OTHER DEALINGS IN THE SOFTWARE.
>>> + */
>>> +
>>> +#include <immintrin.h>
>> Wrap this include in
>> #ifdef __SSE4_2__
>> #endif
>>
>> considering its definitions are used in a similar block
>> ...
> 
> I guess. There shouldn't be any harm to including it, but OK.
> 
>>
>>> +void
>>> +nv84_decoder_vp_h264(struct nv84_decoder *dec,
>>> +                     struct pipe_h264_picture_desc *desc,
>>> +                     struct nv84_video_buffer *dest)
>>> +{
>> ...
>>
>>> +   for (i = 0; i < 2; i++) {
>>    for (i = 0; i < dest->num_planes; i++) {
>>
>>> +      struct nv50_miptree *mt = nv50_miptree(dest->resources[i]);
>>> +      mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
>>> +   }
>>> +
>>> +   PUSH_KICK (push);
>>> +}
>>> +
>> ...
>>
>>> +void
>>> +nv84_decoder_vp_mpeg12_mb(struct nv84_decoder *dec,
>>> +                          struct pipe_mpeg12_picture_desc *desc,
>>> +                          const struct pipe_mpeg12_macroblock *macrob)
>>> +{
>> ...
>>
>>> +#ifdef __SSE4_2__
>> IMHO this may produce non portable binaries in case of aggressive
>> mtune/march flags. I'm not objecting against it just pointing out
> 
> mtune shouldn't cause it to be defined. if you're specifying it on
> march, then that's the target arch, so... the compiler is free to use
> corei7 instructions anyways. Although distros will (likely) never use
> this, but people on, say, gentoo, will get the benefits. A much better
> approach would be to build the code anyways, and then select the best
> version at runtime.
Ouch you got me there, I was not too sure which one was the correct gcc
flag. AFAIK mesa does have code detecting runtime cpu capabilities, but
that can follow on if anyone is interested

Cheers
Emil
> 
> $ gcc -dM -E - < /dev/null | grep SSE4
> $ gcc -mtune=corei7 -dM -E - < /dev/null | grep SSE4
> $ gcc -march=corei7 -dM -E - < /dev/null | grep SSE4
> #define __SSE4_1__ 1
> #define __SSE4_2__ 1
> 
> 
>>
>>> +void
>>> +nv84_decoder_vp_mpeg12(struct nv84_decoder *dec,
>>> +                       struct pipe_mpeg12_picture_desc *desc,
>>> +                       struct nv84_video_buffer *dest)
>>> +{
>> ...
>>
>>> +   for (i = 0; i < 2; i++) {
>>    for (i = 0; i < dest->num_planes; i++) {
>>
>>
>> Cheers
>> Emil
>>
>>> +      struct nv50_miptree *mt = nv50_miptree(dest->resources[i]);
>>> +      mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
>>> +   }
>>> +   PUSH_KICK (push);
>>> +}
>>>
>>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [Nouveau] [PATCH] nv50: H.264/MPEG2 decoding support via VP2, available on NV84-NV96, NVA0
  2013-06-30  0:33     ` Emil Velikov
@ 2013-06-30  1:02       ` Ilia Mirkin
  0 siblings, 0 replies; 7+ messages in thread
From: Ilia Mirkin @ 2013-06-30  1:02 UTC (permalink / raw)
  To: Emil Velikov; +Cc: mesa-dev, Maarten Lankhorst, nouveau

On Sat, Jun 29, 2013 at 8:33 PM, Emil Velikov <emil.l.velikov@gmail.com> wrote:
> On 29/06/13 21:21, Ilia Mirkin wrote:
>> On Sat, Jun 29, 2013 at 2:07 PM, Emil Velikov <emil.l.velikov@gmail.com> wrote:
>>> I believe the hardware is capable of accelerating IDCT for VC1. Do you
>>> have any plans for it ?
>>
>> Nope. With MPEG, there's XvMC, which sends the macroblocks directly.
>> And there's an existing MPEG parser in gallium which can be used for
>> VDPAU. With VC-1, no such thing as XvMC. Which means that I'd have to
>> implement a bitstream parser in gallium. The spec is closed, and I'm
>> not at all familiar with the concepts used in VC-1.
> I thought that gallium already has a vc1 bitstream parser. Anyway if
> anyone is interested the libavcodec project can be useful in that regard

libavcodec is LGPL (incompatible with the mesa license for copying
code) and doesn't have a nice API to extract the macroblock data.

>>> As far as I know mesa in general is keen on keeping trailing statements
>>> on the next line, as well as 78(80) characters line length.
>>
>> I tried to do that... in the places where I violate that I felt that
>> it enhanced readability. Feel free to point out places where you
>> disagree. Also if it's a hard policy (e.g. as in the linux kernel), I
>> can go through and reformat.
> There is a document describing the Mesa Coding Style[1], although
> enforcing it is another question
>
> My main point was statements such as
>
>    if (ret) return NULL
>
> which are rather hard to see when surrounded by compact code.
>
> [1] http://www.mesa3d.org/devinfo.html

Oh, I see what you mean by "trailing statements" now. [I guess that's
really the only interpretation, not sure why it didn't click before.]
I'll go through and clean that up. I actually usually prefer that same
policy too, except for super-short trailing statements.

>>>> +   if (is_h264) {
>>>> +      /* Zero out some parts of mbring/vpring. there's gotta be some cleaner way
>>>> +       * of doing this... perhaps makes sense to just copy the relevant logic
>>>> +       * here. */
>>>> +      color.f[0] = color.f[1] = color.f[2] = color.f[3] = 0;
>>>> +      surf.offset = dec->frame_size;
>>>> +      surf.width = 64;
>>>> +      surf.height = (max_references + 1) * dec->frame_mbs / 4;
>>>> +      surf.depth = 1;
>>>> +      surf.base.format = PIPE_FORMAT_B8G8R8A8_UNORM;
>>>> +      surf.base.u.tex.level = 0;
>>>> +      surf.base.texture = &mip.base.base;
>>>> +      mip.level[0].tile_mode = 0;
>>>> +      mip.level[0].pitch = surf.width * 4;
>>>> +      mip.base.domain = NOUVEAU_BO_VRAM;
>>>> +      mip.base.bo = dec->mbring;
>>>> +      context->clear_render_target(context, (struct pipe_surface *)&surf, &color, 0, 0, 64, 4760);
> You can drop the typecast here
> s/(struct pipe_surface *)&surf/&surf.base/
>
>>>> +      surf.offset = dec->vpring->size / 2 - 0x1000;
>>>> +      surf.width = 1024;
>>>> +      surf.height = 1;
>>>> +      mip.level[0].pitch = surf.width * 4;
>>>> +      mip.base.bo = dec->vpring;
>>>> +      context->clear_render_target(context, (struct pipe_surface *)&surf, &color, 0, 0, 1024, 1);
> Ditto
>
>>>> +      surf.offset = dec->vpring->size - 0x1000;
>>>> +      context->clear_render_target(context, (struct pipe_surface *)&surf, &color, 0, 0, 1024, 1);
> Ditto

Nice catches. I hate this code... all this setup effort just to zero
some stuff out. =/

>>> Looking at the params associated with each video engine, I was wondering
>>> about compacting it into a struct (names chosen are the first thing that
>>> came to mind)
>>>
>>> struct nv84_decoder_eng {
>>>    struct nouveau_object *obj;
>>>    struct nouveau_object *channel;
>>>    struct nouveau_pushbuf *pushbuf;
>>>    struct nouveau_bufctx *bufctx;
>>>
>>>    struct nouveau_bo *fw;
>>>    struct nouveau_bo *data;
>>> }
>>>
>>> and then having an enum for the different engine types
>>>
>>> enum nv84_decoder_eng_type
>>> {
>>>    BSP = 0,
>>>    VP
>>> };
>>>
>>> #define NV84_DECODER_ENG_NUM VP + 1
>>
>> Hmmm.... it's a point... the downside is that things become a little
>> more confusing. I think there's some clarity to just having variable
>> names like "bsp_foo" and "vp_foo" instead of "engines[VP].foo". And
>> there are only 2 engines.
> When I came up with this I was looking at the nvc0 code, which was less
> intuitive
>
>    struct nouveau_object *channel[3], *bsp, *vp, *ppp;
>    struct nouveau_pushbuf *pushbuf[3];
>

Next try to work out how those are populated :) For pre-kepler all the
pushbufs are the same, for kepler they are different. It was suggested
to me that it would be advantageous to keep BSP and VP on separate
channels, and also the blob does it, so I did it that way. I don't
fully understand how the hardware interprets these things, so...
whatever.

>>>> +#ifdef __SSE4_2__
>>> IMHO this may produce non portable binaries in case of aggressive
>>> mtune/march flags. I'm not objecting against it just pointing out
>>
>> mtune shouldn't cause it to be defined. if you're specifying it on
>> march, then that's the target arch, so... the compiler is free to use
>> corei7 instructions anyways. Although distros will (likely) never use
>> this, but people on, say, gentoo, will get the benefits. A much better
>> approach would be to build the code anyways, and then select the best
>> version at runtime.
> Ouch you got me there, I was not too sure which one was the correct gcc
> flag. AFAIK mesa does have code detecting runtime cpu capabilities, but
> that can follow on if anyone is interested

Well, as luck would have it, I've redone it as SSE2 and it's faster
(more instructions, but fewer cycles). So I'm just going to replace it
with SSE2, which is on by default for x86_64. I didn't see any runtime
feature detection logic in gallium, only compile-time (e.g. in
pipe/p_config.h and util/u_sse.h) but I'm very new to the mesa
codebase, so I could have missed it. And sadly __builtin_cpu_supports
is probably too new to use.

Thanks for all the feedback!

  -ilia

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v2] nv50: H.264/MPEG2 decoding support via VP2, available on NV84-NV96, NVA0
       [not found] ` <1372332404-16547-1-git-send-email-imirkin-FrUbXkNCsVf2fBVCVOL8/A@public.gmane.org>
@ 2013-06-30  5:17   ` Ilia Mirkin
       [not found]     ` <1372569420-18489-1-git-send-email-imirkin-FrUbXkNCsVf2fBVCVOL8/A@public.gmane.org>
  0 siblings, 1 reply; 7+ messages in thread
From: Ilia Mirkin @ 2013-06-30  5:17 UTC (permalink / raw)
  To: Maarten Lankhorst
  Cc: mesa-dev-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Adds H.264 and MPEG2 codec support via VP2, using firmware from the
blob. Acceleration is supported at the bitstream level for H.264 and
IDCT level for MPEG2.

Known issues:
 - H.264 interlaced doesn't render properly
 - H.264 shows very occasional artifacts on a small fraction of videos
 - MPEG2 + VDPAU shows frequent but small artifacts, which aren't there
   when using XvMC on the same videos

Signed-off-by: Ilia Mirkin <imirkin-FrUbXkNCsVf2fBVCVOL8/A@public.gmane.org>
---

v1 -> v2:

 - Replaced SSE4.2 code with SSE2 code -- it's a little faster, and works on
   all machines likely to end up with one of these cards.

 - Style cleanups suggested by Emil Velikov. Emil, let me know if there were
   things that I said I'd do but I forgot about (or if there are more things
   that you notice).

 src/gallium/drivers/nv50/Makefile.sources |   5 +-
 src/gallium/drivers/nv50/nv50_context.c   |  13 +-
 src/gallium/drivers/nv50/nv50_context.h   |  24 +
 src/gallium/drivers/nv50/nv50_miptree.c   |  27 +
 src/gallium/drivers/nv50/nv50_resource.h  |   1 +
 src/gallium/drivers/nv50/nv50_screen.c    |  13 +-
 src/gallium/drivers/nv50/nv50_winsys.h    |   4 +
 src/gallium/drivers/nv50/nv84_video.c     | 793 ++++++++++++++++++++++++++++++
 src/gallium/drivers/nv50/nv84_video.h     | 134 +++++
 src/gallium/drivers/nv50/nv84_video_bsp.c | 251 ++++++++++
 src/gallium/drivers/nv50/nv84_video_vp.c  | 547 +++++++++++++++++++++
 11 files changed, 1809 insertions(+), 3 deletions(-)
 create mode 100644 src/gallium/drivers/nv50/nv84_video.c
 create mode 100644 src/gallium/drivers/nv50/nv84_video.h
 create mode 100644 src/gallium/drivers/nv50/nv84_video_bsp.c
 create mode 100644 src/gallium/drivers/nv50/nv84_video_vp.c

diff --git a/src/gallium/drivers/nv50/Makefile.sources b/src/gallium/drivers/nv50/Makefile.sources
index 1092570..0fdac51 100644
--- a/src/gallium/drivers/nv50/Makefile.sources
+++ b/src/gallium/drivers/nv50/Makefile.sources
@@ -13,7 +13,10 @@ C_SOURCES := \
 	nv50_program.c \
 	nv50_shader_state.c \
 	nv50_push.c \
-	nv50_query.c
+	nv50_query.c \
+	nv84_video.c \
+	nv84_video_bsp.c \
+	nv84_video_vp.c
 
 CODEGEN_NV50_SOURCES := \
 	codegen/nv50_ir.cpp \
diff --git a/src/gallium/drivers/nv50/nv50_context.c b/src/gallium/drivers/nv50/nv50_context.c
index 5781c4b..79a0473 100644
--- a/src/gallium/drivers/nv50/nv50_context.c
+++ b/src/gallium/drivers/nv50/nv50_context.c
@@ -258,7 +258,18 @@ nv50_create(struct pipe_screen *pscreen, void *priv)
    draw_set_rasterize_stage(nv50->draw, nv50_draw_render_stage(nv50));
 #endif
 
-   nouveau_context_init_vdec(&nv50->base);
+   if (screen->base.device->chipset < 0x84) {
+      /* PMPEG */
+      nouveau_context_init_vdec(&nv50->base);
+   } else if (screen->base.device->chipset < 0x98 ||
+              screen->base.device->chipset == 0xa0) {
+      /* VP2 */
+      pipe->create_video_decoder = nv84_create_decoder;
+      pipe->create_video_buffer = nv84_video_buffer_create;
+   } else {
+      /* Unsupported, but need to init pointers. */
+      nouveau_context_init_vdec(&nv50->base);
+   }
 
    flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RD;
 
diff --git a/src/gallium/drivers/nv50/nv50_context.h b/src/gallium/drivers/nv50/nv50_context.h
index 0a83131..b204cc8 100644
--- a/src/gallium/drivers/nv50/nv50_context.h
+++ b/src/gallium/drivers/nv50/nv50_context.h
@@ -289,4 +289,28 @@ void nv50_vertex_arrays_validate(struct nv50_context *nv50);
 /* nv50_push.c */
 void nv50_push_vbo(struct nv50_context *, const struct pipe_draw_info *);
 
+/* nv84_video.c */
+struct pipe_video_decoder *
+nv84_create_decoder(struct pipe_context *context,
+                    enum pipe_video_profile profile,
+                    enum pipe_video_entrypoint entrypoint,
+                    enum pipe_video_chroma_format chroma_format,
+                    unsigned width, unsigned height,
+                    unsigned max_references,
+                    bool expect_chunked_decode);
+
+struct pipe_video_buffer *
+nv84_video_buffer_create(struct pipe_context *pipe,
+                         const struct pipe_video_buffer *template);
+
+int
+nv84_screen_get_video_param(struct pipe_screen *pscreen,
+                            enum pipe_video_profile profile,
+                            enum pipe_video_cap param);
+
+boolean
+nv84_screen_video_supported(struct pipe_screen *screen,
+                            enum pipe_format format,
+                            enum pipe_video_profile profile);
+
 #endif
diff --git a/src/gallium/drivers/nv50/nv50_miptree.c b/src/gallium/drivers/nv50/nv50_miptree.c
index 036f1c7..28be768 100644
--- a/src/gallium/drivers/nv50/nv50_miptree.c
+++ b/src/gallium/drivers/nv50/nv50_miptree.c
@@ -239,6 +239,28 @@ nv50_miptree_init_layout_linear(struct nv50_miptree *mt, unsigned pitch_align)
 }
 
 static void
+nv50_miptree_init_layout_video(struct nv50_miptree *mt)
+{
+   const struct pipe_resource *pt = &mt->base.base;
+   const unsigned blocksize = util_format_get_blocksize(pt->format);
+
+   assert(pt->last_level == 0);
+   assert(mt->ms_x == 0 && mt->ms_y == 0);
+   assert(!util_format_is_compressed(pt->format));
+
+   mt->layout_3d = pt->target == PIPE_TEXTURE_3D;
+
+   mt->level[0].tile_mode = 0x20;
+   mt->level[0].pitch = align(pt->width0 * blocksize, 64);
+   mt->total_size = align(pt->height0, 16) * mt->level[0].pitch * (mt->layout_3d ? pt->depth0 : 1);
+
+   if (pt->array_size > 1) {
+      mt->layer_stride = align(mt->total_size, NV50_TILE_SIZE(0x20));
+      mt->total_size = mt->layer_stride * pt->array_size;
+   }
+}
+
+static void
 nv50_miptree_init_layout_tiled(struct nv50_miptree *mt)
 {
    struct pipe_resource *pt = &mt->base.base;
@@ -311,6 +333,11 @@ nv50_miptree_create(struct pipe_screen *pscreen,
       return NULL;
    }
 
+   if (unlikely(pt->flags & NV50_RESOURCE_FLAG_VIDEO)) {
+      nv50_miptree_init_layout_video(mt);
+      /* BO allocation done by client */
+      return pt;
+   } else
    if (bo_config.nv50.memtype != 0) {
       nv50_miptree_init_layout_tiled(mt);
    } else
diff --git a/src/gallium/drivers/nv50/nv50_resource.h b/src/gallium/drivers/nv50/nv50_resource.h
index 6b92463..c520a72 100644
--- a/src/gallium/drivers/nv50/nv50_resource.h
+++ b/src/gallium/drivers/nv50/nv50_resource.h
@@ -16,6 +16,7 @@ nv50_init_resource_functions(struct pipe_context *pcontext);
 void
 nv50_screen_init_resource_functions(struct pipe_screen *pscreen);
 
+#define NV50_RESOURCE_FLAG_VIDEO (NOUVEAU_RESOURCE_FLAG_DRV_PRIV << 0)
 
 #define NV50_TILE_SHIFT_X(m) 6
 #define NV50_TILE_SHIFT_Y(m) ((((m) >> 4) & 0xf) + 2)
diff --git a/src/gallium/drivers/nv50/nv50_screen.c b/src/gallium/drivers/nv50/nv50_screen.c
index b6da303..86bad6b 100644
--- a/src/gallium/drivers/nv50/nv50_screen.c
+++ b/src/gallium/drivers/nv50/nv50_screen.c
@@ -646,7 +646,18 @@ nv50_screen_create(struct nouveau_device *dev)
 
    nv50_screen_init_resource_functions(pscreen);
 
-   nouveau_screen_init_vdec(&screen->base);
+   if (screen->base.device->chipset < 0x84) {
+      /* PMPEG */
+      nouveau_screen_init_vdec(&screen->base);
+   } else if (screen->base.device->chipset < 0x98 ||
+              screen->base.device->chipset == 0xa0) {
+      /* VP2 */
+      screen->base.base.get_video_param = nv84_screen_get_video_param;
+      screen->base.base.is_video_format_supported = nv84_screen_video_supported;
+   } else {
+      /* Unsupported, but need to init pointers. */
+      nouveau_screen_init_vdec(&screen->base);
+   }
 
    ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0, 4096,
                         NULL, &screen->fence.bo);
diff --git a/src/gallium/drivers/nv50/nv50_winsys.h b/src/gallium/drivers/nv50/nv50_winsys.h
index 145ee70..e04247b 100644
--- a/src/gallium/drivers/nv50/nv50_winsys.h
+++ b/src/gallium/drivers/nv50/nv50_winsys.h
@@ -60,6 +60,10 @@ PUSH_REFN(struct nouveau_pushbuf *push, struct nouveau_bo *bo, uint32_t flags)
 #define SUBC_COMPUTE(m) 6, (m)
 #define NV50_COMPUTE(n) SUBC_COMPUTE(NV50_COMPUTE_##n)
 
+/* These are expected to be on their own pushbufs */
+#define SUBC_BSP(m) 2, (m)
+#define SUBC_VP(m) 2, (m)
+
 
 static INLINE uint32_t
 NV50_FIFO_PKHDR(int subc, int mthd, unsigned size)
diff --git a/src/gallium/drivers/nv50/nv84_video.c b/src/gallium/drivers/nv50/nv84_video.c
new file mode 100644
index 0000000..502379a
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv84_video.c
@@ -0,0 +1,793 @@
+/*
+ * Copyright 2013 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+
+#include "util/u_format.h"
+#include "util/u_sampler.h"
+#include "vl/vl_zscan.h"
+
+#include "nv84_video.h"
+
+static int
+nv84_copy_firmware(const char *path, void *dest, ssize_t len)
+{
+   int fd = open(path, O_RDONLY | O_CLOEXEC);
+   ssize_t r;
+   if (fd < 0) {
+      fprintf(stderr, "opening firmware file %s failed: %m\n", path);
+      return 1;
+   }
+   r = read(fd, dest, len);
+   close(fd);
+
+   if (r != len) {
+      fprintf(stderr, "reading firwmare file %s failed: %m\n", path);
+      return 1;
+   }
+
+   return 0;
+}
+
+static int
+filesize(const char *path)
+{
+   int ret;
+   struct stat statbuf;
+
+   ret = stat(path, &statbuf);
+   if (ret)
+      return ret;
+   return statbuf.st_size;
+}
+
+static struct nouveau_bo *
+nv84_load_firmwares(struct nouveau_device *dev, struct nv84_decoder *dec,
+                    const char *fw1, const char *fw2)
+{
+   int ret, size1, size2 = 0;
+   struct nouveau_bo *fw;
+
+   size1 = filesize(fw1);
+   if (fw2)
+      size2 = filesize(fw2);
+   if (size1 < 0 || size2 < 0)
+      return NULL;
+
+   dec->vp_fw2_offset = align(size1, 0x100);
+
+   ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 0, dec->vp_fw2_offset + size2, NULL, &fw);
+   if (ret)
+      return NULL;
+   ret = nouveau_bo_map(fw, NOUVEAU_BO_WR, dec->client);
+   if (ret)
+      goto error;
+
+   ret = nv84_copy_firmware(fw1, fw->map, size1);
+   if (fw2 && !ret)
+      ret = nv84_copy_firmware(fw2, fw->map + dec->vp_fw2_offset, size2);
+   munmap(fw->map, fw->size);
+   fw->map = NULL;
+   if (!ret)
+      return fw;
+error:
+   nouveau_bo_ref(NULL, &fw);
+   return NULL;
+}
+
+static struct nouveau_bo *
+nv84_load_bsp_firmware(struct nouveau_device *dev, struct nv84_decoder *dec)
+{
+   return nv84_load_firmwares(
+         dev, dec, "/lib/firmware/nouveau/nv84_bsp-h264", NULL);
+}
+
+static struct nouveau_bo *
+nv84_load_vp_firmware(struct nouveau_device *dev, struct nv84_decoder *dec)
+{
+   return nv84_load_firmwares(
+         dev, dec,
+         "/lib/firmware/nouveau/nv84_vp-h264-1",
+         "/lib/firmware/nouveau/nv84_vp-h264-2");
+}
+
+static struct nouveau_bo *
+nv84_load_vp_firmware_mpeg(struct nouveau_device *dev, struct nv84_decoder *dec)
+{
+   return nv84_load_firmwares(
+         dev, dec, "/lib/firmware/nouveau/nv84_vp-mpeg12", NULL);
+}
+
+static void
+nv84_decoder_decode_bitstream_h264(struct pipe_video_decoder *decoder,
+                                   struct pipe_video_buffer *video_target,
+                                   struct pipe_picture_desc *picture,
+                                   unsigned num_buffers,
+                                   const void *const *data,
+                                   const unsigned *num_bytes)
+{
+   struct nv84_decoder *dec = (struct nv84_decoder *)decoder;
+   struct nv84_video_buffer *target = (struct nv84_video_buffer *)video_target;
+
+   struct pipe_h264_picture_desc *desc = (struct pipe_h264_picture_desc *)picture;
+
+   assert(target->base.buffer_format == PIPE_FORMAT_NV12);
+
+   nv84_decoder_bsp(dec, desc, num_buffers, data, num_bytes, target);
+   nv84_decoder_vp_h264(dec, desc, target);
+}
+
+static void
+nv84_decoder_flush(struct pipe_video_decoder *decoder)
+{
+}
+
+static void
+nv84_decoder_begin_frame_h264(struct pipe_video_decoder *decoder,
+                              struct pipe_video_buffer *target,
+                              struct pipe_picture_desc *picture)
+{
+}
+
+static void
+nv84_decoder_end_frame_h264(struct pipe_video_decoder *decoder,
+                            struct pipe_video_buffer *target,
+                            struct pipe_picture_desc *picture)
+{
+}
+
+static void
+nv84_decoder_decode_bitstream_mpeg12(struct pipe_video_decoder *decoder,
+                                     struct pipe_video_buffer *video_target,
+                                     struct pipe_picture_desc *picture,
+                                     unsigned num_buffers,
+                                     const void *const *data,
+                                     const unsigned *num_bytes)
+{
+   struct nv84_decoder *dec = (struct nv84_decoder *)decoder;
+
+   assert(video_target->buffer_format == PIPE_FORMAT_NV12);
+
+   vl_mpg12_bs_decode(dec->mpeg12_bs,
+                      video_target,
+                      (struct pipe_mpeg12_picture_desc *)picture,
+                      num_buffers,
+                      data,
+                      num_bytes);
+}
+
+static void
+nv84_decoder_begin_frame_mpeg12(struct pipe_video_decoder *decoder,
+                              struct pipe_video_buffer *target,
+                              struct pipe_picture_desc *picture)
+{
+   struct nv84_decoder *dec = (struct nv84_decoder *)decoder;
+   struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
+   int i;
+
+   nouveau_bo_wait(dec->mpeg12_bo, NOUVEAU_BO_RDWR, dec->client);
+   dec->mpeg12_mb_info = dec->mpeg12_bo->map + 0x100;
+   dec->mpeg12_data = dec->mpeg12_bo->map + 0x100 +
+      align(0x20 * mb(dec->base.width) * mb(dec->base.height), 0x100);
+   if (desc->intra_matrix) {
+      dec->zscan = desc->alternate_scan ? vl_zscan_alternate : vl_zscan_normal;
+      for (i = 0; i < 64; i++) {
+         dec->mpeg12_intra_matrix[i] = desc->intra_matrix[dec->zscan[i]];
+         dec->mpeg12_non_intra_matrix[i] = desc->non_intra_matrix[dec->zscan[i]];
+      }
+      dec->mpeg12_intra_matrix[0] = 1 << (7 - desc->intra_dc_precision);
+   }
+}
+
+static void
+nv84_decoder_end_frame_mpeg12(struct pipe_video_decoder *decoder,
+                              struct pipe_video_buffer *target,
+                              struct pipe_picture_desc *picture)
+{
+   nv84_decoder_vp_mpeg12(
+         (struct nv84_decoder *)decoder,
+         (struct pipe_mpeg12_picture_desc *)picture,
+         (struct nv84_video_buffer *)target);
+}
+
+static void
+nv84_decoder_decode_macroblock(struct pipe_video_decoder *decoder,
+                               struct pipe_video_buffer *target,
+                               struct pipe_picture_desc *picture,
+                               const struct pipe_macroblock *macroblocks,
+                               unsigned num_macroblocks)
+{
+   const struct pipe_mpeg12_macroblock *mb = (const struct pipe_mpeg12_macroblock *)macroblocks;
+   for (int i = 0; i < num_macroblocks; i++, mb++) {
+      nv84_decoder_vp_mpeg12_mb(
+            (struct nv84_decoder *)decoder,
+            (struct pipe_mpeg12_picture_desc *)picture,
+            mb);
+   }
+}
+
+static void
+nv84_decoder_destroy(struct pipe_video_decoder *decoder)
+{
+   struct nv84_decoder *dec = (struct nv84_decoder *)decoder;
+
+   nouveau_bo_ref(NULL, &dec->bsp_fw);
+   nouveau_bo_ref(NULL, &dec->bsp_data);
+   nouveau_bo_ref(NULL, &dec->vp_fw);
+   nouveau_bo_ref(NULL, &dec->vp_data);
+   nouveau_bo_ref(NULL, &dec->mbring);
+   nouveau_bo_ref(NULL, &dec->vpring);
+   nouveau_bo_ref(NULL, &dec->bitstream);
+   nouveau_bo_ref(NULL, &dec->vp_params);
+   nouveau_bo_ref(NULL, &dec->fence);
+
+   nouveau_object_del(&dec->bsp);
+   nouveau_object_del(&dec->vp);
+
+   nouveau_bufctx_del(&dec->bsp_bufctx);
+   nouveau_pushbuf_del(&dec->bsp_pushbuf);
+   nouveau_object_del(&dec->bsp_channel);
+
+   nouveau_bufctx_del(&dec->vp_bufctx);
+   nouveau_pushbuf_del(&dec->vp_pushbuf);
+   nouveau_object_del(&dec->vp_channel);
+
+   nouveau_client_del(&dec->client);
+
+   if (dec->mpeg12_bs)
+      FREE(dec->mpeg12_bs);
+   FREE(dec);
+}
+
+struct pipe_video_decoder *
+nv84_create_decoder(struct pipe_context *context,
+                    enum pipe_video_profile profile,
+                    enum pipe_video_entrypoint entrypoint,
+                    enum pipe_video_chroma_format chroma_format,
+                    unsigned width, unsigned height,
+                    unsigned max_references,
+                    bool chunked_decode)
+{
+   struct nv50_context *nv50 = (struct nv50_context *)context;
+   struct nouveau_screen *screen = &nv50->screen->base;
+   struct nv84_decoder *dec;
+   struct nouveau_pushbuf *bsp_push, *vp_push;
+   struct nv50_surface surf;
+   struct nv50_miptree mip;
+   union pipe_color_union color;
+   struct nv04_fifo nv04_data = { .vram = 0xbeef0201, .gart = 0xbeef0202 };
+   int ret, i;
+   int is_h264 = u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG4_AVC;
+   int is_mpeg12 = u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12;
+
+   if (getenv("XVMC_VL"))
+      return vl_create_decoder(context, profile, entrypoint,
+                               chroma_format, width, height,
+                               max_references, chunked_decode);
+
+   if ((is_h264 && entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM) ||
+       (is_mpeg12 && entrypoint > PIPE_VIDEO_ENTRYPOINT_IDCT)) {
+      debug_printf("%x\n", entrypoint);
+      return NULL;
+   }
+
+   if (!is_h264 && !is_mpeg12) {
+      debug_printf("invalid profile: %x\n", profile);
+      return NULL;
+   }
+
+   dec = CALLOC_STRUCT(nv84_decoder);
+   if (!dec)
+      return NULL;
+
+   dec->base.context = context;
+   dec->base.profile = profile;
+   dec->base.entrypoint = entrypoint;
+   dec->base.chroma_format = chroma_format;
+   dec->base.width = width;
+   dec->base.height = height;
+   dec->base.max_references = max_references;
+   dec->base.destroy = nv84_decoder_destroy;
+   dec->base.flush = nv84_decoder_flush;
+   if (is_h264) {
+      dec->base.decode_bitstream = nv84_decoder_decode_bitstream_h264;
+      dec->base.begin_frame = nv84_decoder_begin_frame_h264;
+      dec->base.end_frame = nv84_decoder_end_frame_h264;
+
+      dec->frame_mbs = mb(dec->base.width) * mb_half(dec->base.height) * 2;
+      dec->frame_size = dec->frame_mbs << 8;
+      dec->vpring_deblock = align(0x30 * dec->frame_mbs, 0x100);
+      dec->vpring_residual = 0x2000 + MAX2(0x32000, 0x600 * dec->frame_mbs);
+      dec->vpring_ctrl = MAX2(0x10000, align(0x1080 + 0x144 * dec->frame_mbs, 0x100));
+   } else if (is_mpeg12) {
+      dec->base.decode_macroblock = nv84_decoder_decode_macroblock;
+      dec->base.begin_frame = nv84_decoder_begin_frame_mpeg12;
+      dec->base.end_frame = nv84_decoder_end_frame_mpeg12;
+
+      if (entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+         dec->mpeg12_bs = CALLOC_STRUCT(vl_mpg12_bs);
+         if (!dec->mpeg12_bs)
+            goto fail;
+         vl_mpg12_bs_init(dec->mpeg12_bs, &dec->base);
+         dec->base.decode_bitstream = nv84_decoder_decode_bitstream_mpeg12;
+      }
+   } else {
+      goto fail;
+   }
+
+   ret = nouveau_client_new(screen->device, &dec->client);
+   if (ret)
+      goto fail;
+
+   if (is_h264) {
+      ret = nouveau_object_new(&screen->device->object, 0,
+                               NOUVEAU_FIFO_CHANNEL_CLASS,
+                               &nv04_data, sizeof(nv04_data), &dec->bsp_channel);
+      if (ret)
+         goto fail;
+
+      ret = nouveau_pushbuf_new(dec->client, dec->bsp_channel, 4,
+                                32 * 1024, true, &dec->bsp_pushbuf);
+      if (ret)
+         goto fail;
+
+      ret = nouveau_bufctx_new(dec->client, 1, &dec->bsp_bufctx);
+      if (ret)
+         goto fail;
+   }
+
+   ret = nouveau_object_new(&screen->device->object, 0,
+                            NOUVEAU_FIFO_CHANNEL_CLASS,
+                            &nv04_data, sizeof(nv04_data), &dec->vp_channel);
+   if (ret)
+      goto fail;
+   ret = nouveau_pushbuf_new(dec->client, dec->vp_channel, 4,
+                             32 * 1024, true, &dec->vp_pushbuf);
+   if (ret)
+      goto fail;
+
+   ret = nouveau_bufctx_new(dec->client, 1, &dec->vp_bufctx);
+   if (ret)
+      goto fail;
+
+   bsp_push = dec->bsp_pushbuf;
+   vp_push = dec->vp_pushbuf;
+
+   if (is_h264) {
+      dec->bsp_fw = nv84_load_bsp_firmware(screen->device, dec);
+      dec->vp_fw = nv84_load_vp_firmware(screen->device, dec);
+      if (!dec->bsp_fw || !dec->vp_fw)
+         goto fail;
+   }
+   if (is_mpeg12) {
+      dec->vp_fw = nv84_load_vp_firmware_mpeg(screen->device, dec);
+      if (!dec->vp_fw)
+         goto fail;
+   }
+
+   if (is_h264) {
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP,
+                           0, 0x40000, NULL, &dec->bsp_data);
+      if (ret)
+         goto fail;
+   }
+   ret = nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP,
+                        0, 0x40000, NULL, &dec->vp_data);
+   if (ret)
+      goto fail;
+   if (is_h264) {
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP,
+                           0,
+                           2 * (dec->vpring_deblock +
+                                dec->vpring_residual +
+                                dec->vpring_ctrl +
+                                0x1000),
+                           NULL, &dec->vpring);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP,
+                           0,
+                           (max_references + 1) * dec->frame_mbs * 0x40 +
+                           dec->frame_size + 0x2000,
+                           NULL, &dec->mbring);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_GART,
+                           0, 2 * (0x700 + MAX2(0x40000, 0x800 + 0x180 * dec->frame_mbs)),
+                           NULL, &dec->bitstream);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_map(dec->bitstream, NOUVEAU_BO_WR, dec->client);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_GART,
+                           0, 0x2000, NULL, &dec->vp_params);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_map(dec->vp_params, NOUVEAU_BO_WR, dec->client);
+      if (ret)
+         goto fail;
+   }
+   if (is_mpeg12) {
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_GART,
+                           0,
+                           align(0x20 * mb(width) * mb(height), 0x100) +
+                           (6 * 64 * 8) * mb(width) * mb(height) + 0x100,
+                           NULL, &dec->mpeg12_bo);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_map(dec->mpeg12_bo, NOUVEAU_BO_WR, dec->client);
+      if (ret)
+         goto fail;
+   }
+
+   ret = nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM,
+                        0, 0x1000, NULL, &dec->fence);
+   if (ret)
+      goto fail;
+   ret = nouveau_bo_map(dec->fence, NOUVEAU_BO_WR, dec->client);
+   if (ret)
+      goto fail;
+   *(uint32_t *)dec->fence->map = 0;
+
+   if (is_h264) {
+      nouveau_pushbuf_bufctx(bsp_push, dec->bsp_bufctx);
+      nouveau_bufctx_refn(dec->bsp_bufctx, 0,
+                          dec->bsp_fw, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
+      nouveau_bufctx_refn(dec->bsp_bufctx, 0,
+                          dec->bsp_data, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
+   }
+
+   nouveau_pushbuf_bufctx(vp_push, dec->vp_bufctx);
+   nouveau_bufctx_refn(dec->vp_bufctx, 0, dec->vp_fw,
+                       NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
+   nouveau_bufctx_refn(dec->vp_bufctx, 0, dec->vp_data,
+                       NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
+
+   if (is_h264 && !ret)
+      ret = nouveau_object_new(dec->bsp_channel, 0xbeef74b0, 0x74b0,
+                               NULL, 0, &dec->bsp);
+
+   if (!ret)
+      ret = nouveau_object_new(dec->vp_channel, 0xbeef7476, 0x7476,
+                               NULL, 0, &dec->vp);
+
+   if (ret)
+      goto fail;
+
+
+   if (is_h264) {
+      /* Zero out some parts of mbring/vpring. there's gotta be some cleaner way
+       * of doing this... perhaps makes sense to just copy the relevant logic
+       * here. */
+      color.f[0] = color.f[1] = color.f[2] = color.f[3] = 0;
+      surf.offset = dec->frame_size;
+      surf.width = 64;
+      surf.height = (max_references + 1) * dec->frame_mbs / 4;
+      surf.depth = 1;
+      surf.base.format = PIPE_FORMAT_B8G8R8A8_UNORM;
+      surf.base.u.tex.level = 0;
+      surf.base.texture = &mip.base.base;
+      mip.level[0].tile_mode = 0;
+      mip.level[0].pitch = surf.width * 4;
+      mip.base.domain = NOUVEAU_BO_VRAM;
+      mip.base.bo = dec->mbring;
+      context->clear_render_target(context, &surf.base, &color, 0, 0, 64, 4760);
+      surf.offset = dec->vpring->size / 2 - 0x1000;
+      surf.width = 1024;
+      surf.height = 1;
+      mip.level[0].pitch = surf.width * 4;
+      mip.base.bo = dec->vpring;
+      context->clear_render_target(context, &surf.base, &color, 0, 0, 1024, 1);
+      surf.offset = dec->vpring->size - 0x1000;
+      context->clear_render_target(context, &surf.base, &color, 0, 0, 1024, 1);
+
+      PUSH_SPACE(screen->pushbuf, 5);
+      PUSH_REFN(screen->pushbuf, dec->fence, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
+      /* The clear_render_target is done via 3D engine, so use it to write to a
+       * sempahore to indicate that it's done.
+       */
+      BEGIN_NV04(screen->pushbuf, NV50_3D(QUERY_ADDRESS_HIGH), 4);
+      PUSH_DATAh(screen->pushbuf, dec->fence->offset);
+      PUSH_DATA (screen->pushbuf, dec->fence->offset);
+      PUSH_DATA (screen->pushbuf, 1);
+      PUSH_DATA (screen->pushbuf, 0xf010);
+      PUSH_KICK (screen->pushbuf);
+
+      PUSH_SPACE(bsp_push, 2 + 12 + 2 + 4 + 3);
+
+      BEGIN_NV04(bsp_push, SUBC_BSP(NV01_SUBCHAN_OBJECT), 1);
+      PUSH_DATA (bsp_push, dec->bsp->handle);
+
+      BEGIN_NV04(bsp_push, SUBC_BSP(0x180), 11);
+      for (i = 0; i < 11; i++)
+         PUSH_DATA(bsp_push, nv04_data.vram);
+      BEGIN_NV04(bsp_push, SUBC_BSP(0x1b8), 1);
+      PUSH_DATA (bsp_push, nv04_data.vram);
+
+      BEGIN_NV04(bsp_push, SUBC_BSP(0x600), 3);
+      PUSH_DATAh(bsp_push, dec->bsp_fw->offset);
+      PUSH_DATA (bsp_push, dec->bsp_fw->offset);
+      PUSH_DATA (bsp_push, dec->bsp_fw->size);
+
+      BEGIN_NV04(bsp_push, SUBC_BSP(0x628), 2);
+      PUSH_DATA (bsp_push, dec->bsp_data->offset >> 8);
+      PUSH_DATA (bsp_push, dec->bsp_data->size);
+      PUSH_KICK (bsp_push);
+   }
+
+   PUSH_SPACE(vp_push, 2 + 12 + 2 + 4 + 3);
+
+   BEGIN_NV04(vp_push, SUBC_VP(NV01_SUBCHAN_OBJECT), 1);
+   PUSH_DATA (vp_push, dec->vp->handle);
+
+   BEGIN_NV04(vp_push, SUBC_VP(0x180), 11);
+   for (i = 0; i < 11; i++)
+      PUSH_DATA(vp_push, nv04_data.vram);
+
+   BEGIN_NV04(vp_push, SUBC_VP(0x1b8), 1);
+   PUSH_DATA (vp_push, nv04_data.vram);
+
+   BEGIN_NV04(vp_push, SUBC_VP(0x600), 3);
+   PUSH_DATAh(vp_push, dec->vp_fw->offset);
+   PUSH_DATA (vp_push, dec->vp_fw->offset);
+   PUSH_DATA (vp_push, dec->vp_fw->size);
+
+   BEGIN_NV04(vp_push, SUBC_VP(0x628), 2);
+   PUSH_DATA (vp_push, dec->vp_data->offset >> 8);
+   PUSH_DATA (vp_push, dec->vp_data->size);
+   PUSH_KICK (vp_push);
+
+   return &dec->base;
+fail:
+   nv84_decoder_destroy(&dec->base);
+   return NULL;
+}
+
+static struct pipe_sampler_view **
+nv84_video_buffer_sampler_view_planes(struct pipe_video_buffer *buffer)
+{
+   struct nv84_video_buffer *buf = (struct nv84_video_buffer *)buffer;
+   return buf->sampler_view_planes;
+}
+
+static struct pipe_sampler_view **
+nv84_video_buffer_sampler_view_components(struct pipe_video_buffer *buffer)
+{
+   struct nv84_video_buffer *buf = (struct nv84_video_buffer *)buffer;
+   return buf->sampler_view_components;
+}
+
+static struct pipe_surface **
+nv84_video_buffer_surfaces(struct pipe_video_buffer *buffer)
+{
+   struct nv84_video_buffer *buf = (struct nv84_video_buffer *)buffer;
+   return buf->surfaces;
+}
+
+static void
+nv84_video_buffer_destroy(struct pipe_video_buffer *buffer)
+{
+   struct nv84_video_buffer *buf = (struct nv84_video_buffer *)buffer;
+   unsigned i;
+
+   assert(buf);
+
+   for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
+      pipe_resource_reference(&buf->resources[i], NULL);
+      pipe_sampler_view_reference(&buf->sampler_view_planes[i], NULL);
+      pipe_sampler_view_reference(&buf->sampler_view_components[i], NULL);
+      pipe_surface_reference(&buf->surfaces[i * 2], NULL);
+      pipe_surface_reference(&buf->surfaces[i * 2 + 1], NULL);
+   }
+
+   nouveau_bo_ref(NULL, &buf->interlaced);
+   nouveau_bo_ref(NULL, &buf->full);
+
+   FREE(buffer);
+}
+
+struct pipe_video_buffer *
+nv84_video_buffer_create(struct pipe_context *pipe,
+                         const struct pipe_video_buffer *template)
+{
+   struct nv84_video_buffer *buffer;
+   struct pipe_resource templ;
+   unsigned i, j, component;
+   struct pipe_sampler_view sv_templ;
+   struct pipe_surface surf_templ;
+   struct nv50_miptree *mt0, *mt1;
+   struct nouveau_bo *empty = NULL;
+   struct nouveau_screen *screen = &((struct nv50_context *)pipe)->screen->base;
+   union nouveau_bo_config cfg;
+   unsigned bo_size;
+
+   if (getenv("XVMC_VL"))
+      return vl_video_buffer_create(pipe, template);
+
+   if (!template->interlaced) {
+      debug_printf("Require interlaced video buffers\n");
+      return NULL;
+   }
+   if (template->buffer_format != PIPE_FORMAT_NV12) {
+      debug_printf("Must use NV12 format\n");
+      return NULL;
+   }
+   if (template->chroma_format != PIPE_VIDEO_CHROMA_FORMAT_420) {
+      debug_printf("Must use 4:2:0 format\n");
+      return NULL;
+   }
+
+   /*
+    * Note that there are always going to be exactly two planes, one for Y,
+    * and one for UV. These are also the resources. VP expects these to be
+    * adjacent, so they need to belong to the same BO.
+    */
+
+   buffer = CALLOC_STRUCT(nv84_video_buffer);
+   if (!buffer) return NULL;
+
+   buffer->mvidx = -1;
+
+   buffer->base.buffer_format = template->buffer_format;
+   buffer->base.context = pipe;
+   buffer->base.destroy = nv84_video_buffer_destroy;
+   buffer->base.chroma_format = template->chroma_format;
+   buffer->base.width = template->width;
+   buffer->base.height = template->height;
+   buffer->base.get_sampler_view_planes = nv84_video_buffer_sampler_view_planes;
+   buffer->base.get_sampler_view_components = nv84_video_buffer_sampler_view_components;
+   buffer->base.get_surfaces = nv84_video_buffer_surfaces;
+   buffer->base.interlaced = true;
+
+   memset(&templ, 0, sizeof(templ));
+   templ.target = PIPE_TEXTURE_2D_ARRAY;
+   templ.depth0 = 1;
+   templ.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
+   templ.format = PIPE_FORMAT_R8_UNORM;
+   templ.width0 = align(template->width, 2);
+   templ.height0 = align(template->height, 4) / 2;
+   templ.flags = NV50_RESOURCE_FLAG_VIDEO;
+   templ.array_size = 2;
+
+   cfg.nv50.tile_mode = 0x20;
+   cfg.nv50.memtype = 0x70;
+
+   buffer->resources[0] = pipe->screen->resource_create(pipe->screen, &templ);
+   if (!buffer->resources[0])
+      goto error;
+
+   templ.format = PIPE_FORMAT_R8G8_UNORM;
+   templ.width0 /= 2;
+   templ.height0 /= 2;
+   buffer->resources[1] = pipe->screen->resource_create(pipe->screen, &templ);
+   if (!buffer->resources[1])
+      goto error;
+
+   mt0 = nv50_miptree(buffer->resources[0]);
+   mt1 = nv50_miptree(buffer->resources[1]);
+
+   bo_size = mt0->total_size + mt1->total_size;
+   if (nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP, 0,
+                      bo_size, &cfg, &buffer->interlaced))
+      goto error;
+   /* XXX Change reference frame management so that this is only allocated in
+    * the decoder when necessary. */
+   if (nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP, 0,
+                      bo_size, &cfg, &buffer->full))
+      goto error;
+
+   mt0->base.bo = buffer->interlaced;
+   mt0->base.domain = NOUVEAU_BO_VRAM;
+   mt0->base.offset = 0;
+   mt0->base.address = buffer->interlaced->offset + mt0->base.offset;
+   nouveau_bo_ref(buffer->interlaced, &empty);
+
+   mt1->base.bo = buffer->interlaced;
+   mt1->base.domain = NOUVEAU_BO_VRAM;
+   mt1->base.offset = mt0->layer_stride * 2;
+   mt1->base.address = buffer->interlaced->offset + mt1->base.offset;
+   nouveau_bo_ref(buffer->interlaced, &empty);
+
+   memset(&sv_templ, 0, sizeof(sv_templ));
+   for (component = 0, i = 0; i < 2; ++i ) {
+      struct pipe_resource *res = buffer->resources[i];
+      unsigned nr_components = util_format_get_nr_components(res->format);
+
+      u_sampler_view_default_template(&sv_templ, res, res->format);
+      buffer->sampler_view_planes[i] =
+         pipe->create_sampler_view(pipe, res, &sv_templ);
+      if (!buffer->sampler_view_planes[i])
+         goto error;
+
+      for (j = 0; j < nr_components; ++j, ++component) {
+         sv_templ.swizzle_r = sv_templ.swizzle_g = sv_templ.swizzle_b =
+            PIPE_SWIZZLE_RED + j;
+         sv_templ.swizzle_a = PIPE_SWIZZLE_ONE;
+
+         buffer->sampler_view_components[component] =
+            pipe->create_sampler_view(pipe, res, &sv_templ);
+         if (!buffer->sampler_view_components[component])
+            goto error;
+      }
+   }
+
+   memset(&surf_templ, 0, sizeof(surf_templ));
+   for (j = 0; j < 2; ++j) {
+      surf_templ.format = buffer->resources[j]->format;
+      surf_templ.u.tex.first_layer = surf_templ.u.tex.last_layer = 0;
+      buffer->surfaces[j * 2] =
+         pipe->create_surface(pipe, buffer->resources[j], &surf_templ);
+      if (!buffer->surfaces[j * 2])
+         goto error;
+
+      surf_templ.u.tex.first_layer = surf_templ.u.tex.last_layer = 1;
+      buffer->surfaces[j * 2 + 1] =
+         pipe->create_surface(pipe, buffer->resources[j], &surf_templ);
+      if (!buffer->surfaces[j * 2 + 1])
+         goto error;
+   }
+
+   return &buffer->base;
+
+error:
+   nv84_video_buffer_destroy(&buffer->base);
+   return NULL;
+}
+
+int
+nv84_screen_get_video_param(struct pipe_screen *pscreen,
+                            enum pipe_video_profile profile,
+                            enum pipe_video_cap param)
+{
+   switch (param) {
+   case PIPE_VIDEO_CAP_SUPPORTED:
+      return u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG4_AVC ||
+         u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12;
+   case PIPE_VIDEO_CAP_NPOT_TEXTURES:
+      return 1;
+   case PIPE_VIDEO_CAP_MAX_WIDTH:
+   case PIPE_VIDEO_CAP_MAX_HEIGHT:
+      return 2048;
+   case PIPE_VIDEO_CAP_PREFERED_FORMAT:
+      return PIPE_FORMAT_NV12;
+   case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED:
+   case PIPE_VIDEO_CAP_PREFERS_INTERLACED:
+      return true;
+   case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE:
+      return false;
+   default:
+      debug_printf("unknown video param: %d\n", param);
+      return 0;
+   }
+}
+
+boolean
+nv84_screen_video_supported(struct pipe_screen *screen,
+                            enum pipe_format format,
+                            enum pipe_video_profile profile)
+{
+   return format == PIPE_FORMAT_NV12;
+}
diff --git a/src/gallium/drivers/nv50/nv84_video.h b/src/gallium/drivers/nv50/nv84_video.h
new file mode 100644
index 0000000..4ff8cf3
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv84_video.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2013 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef NV84_VIDEO_H_
+#define NV84_VIDEO_H_
+
+#include "vl/vl_decoder.h"
+#include "vl/vl_video_buffer.h"
+#include "vl/vl_types.h"
+
+#include "vl/vl_mpeg12_bitstream.h"
+
+#include "util/u_video.h"
+
+#include "nv50_context.h"
+
+union pipe_desc {
+   struct pipe_picture_desc *base;
+   struct pipe_mpeg12_picture_desc *mpeg12;
+   struct pipe_mpeg4_picture_desc *mpeg4;
+   struct pipe_vc1_picture_desc *vc1;
+   struct pipe_h264_picture_desc *h264;
+};
+
+struct nv84_video_buffer {
+   struct pipe_video_buffer base;
+   struct pipe_resource *resources[VL_NUM_COMPONENTS];
+   struct pipe_sampler_view *sampler_view_planes[VL_NUM_COMPONENTS];
+   struct pipe_sampler_view *sampler_view_components[VL_NUM_COMPONENTS];
+   struct pipe_surface *surfaces[VL_NUM_COMPONENTS * 2];
+
+   struct nouveau_bo *interlaced, *full;
+   int mvidx;
+   unsigned frame_num, frame_num_max;
+};
+
+struct nv84_decoder {
+   struct pipe_video_decoder base;
+   struct nouveau_client *client;
+   struct nouveau_object *bsp_channel, *vp_channel, *bsp, *vp;
+   struct nouveau_pushbuf *bsp_pushbuf, *vp_pushbuf;
+   struct nouveau_bufctx *bsp_bufctx, *vp_bufctx;
+
+   struct nouveau_bo *bsp_fw, *bsp_data;
+   struct nouveau_bo *vp_fw, *vp_data;
+   struct nouveau_bo *mbring, *vpring;
+
+   /*
+    * states:
+    *  0: init
+    *  1: vpring/mbring cleared, bsp is ready
+    *  2: bsp is done, vp is ready
+    * and then vp it back to 1
+    */
+   struct nouveau_bo *fence;
+
+   struct nouveau_bo *bitstream;
+   struct nouveau_bo *vp_params;
+
+   size_t vp_fw2_offset;
+
+   unsigned frame_mbs, frame_size;
+   /* VPRING layout:
+        RESIDUAL
+        CTRL
+        DEBLOCK
+        0x1000
+   */
+   unsigned vpring_deblock, vpring_residual, vpring_ctrl;
+
+
+   struct vl_mpg12_bs *mpeg12_bs;
+
+   struct nouveau_bo *mpeg12_bo;
+   void *mpeg12_mb_info;
+   uint16_t *mpeg12_data;
+   const int *zscan;
+   uint8_t mpeg12_intra_matrix[64];
+   uint8_t mpeg12_non_intra_matrix[64];
+};
+
+static INLINE uint32_t mb(uint32_t coord)
+{
+   return (coord + 0xf)>>4;
+}
+
+static INLINE uint32_t mb_half(uint32_t coord)
+{
+   return (coord + 0x1f)>>5;
+}
+
+int
+nv84_decoder_bsp(struct nv84_decoder *dec,
+                 struct pipe_h264_picture_desc *desc,
+                 unsigned num_buffers,
+                 const void *const *data,
+                 const unsigned *num_bytes,
+                 struct nv84_video_buffer *dest);
+
+void
+nv84_decoder_vp_h264(struct nv84_decoder *dec,
+                     struct pipe_h264_picture_desc *desc,
+                     struct nv84_video_buffer *dest);
+
+void
+nv84_decoder_vp_mpeg12_mb(struct nv84_decoder *dec,
+                          struct pipe_mpeg12_picture_desc *desc,
+                          const struct pipe_mpeg12_macroblock *mb);
+
+void
+nv84_decoder_vp_mpeg12(struct nv84_decoder *dec,
+                       struct pipe_mpeg12_picture_desc *desc,
+                       struct nv84_video_buffer *dest);
+
+#endif
diff --git a/src/gallium/drivers/nv50/nv84_video_bsp.c b/src/gallium/drivers/nv50/nv84_video_bsp.c
new file mode 100644
index 0000000..7885210
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv84_video_bsp.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2013 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "nv84_video.h"
+
+struct iparm {
+   struct iseqparm {
+      uint32_t chroma_format_idc; // 00
+      uint32_t pad[(0x128 - 0x4) / 4];
+      uint32_t log2_max_frame_num_minus4; // 128
+      uint32_t pic_order_cnt_type; // 12c
+      uint32_t log2_max_pic_order_cnt_lsb_minus4; // 130
+      uint32_t delta_pic_order_always_zero_flag; // 134
+      uint32_t num_ref_frames; // 138
+      uint32_t pic_width_in_mbs_minus1; // 13c
+      uint32_t pic_height_in_map_units_minus1; // 140
+      uint32_t frame_mbs_only_flag; // 144
+      uint32_t mb_adaptive_frame_field_flag; // 148
+      uint32_t direct_8x8_inference_flag; // 14c
+   } iseqparm; // 000
+   struct ipicparm {
+      uint32_t entropy_coding_mode_flag; // 00
+      uint32_t pic_order_present_flag; // 04
+      uint32_t num_slice_groups_minus1; // 08
+      uint32_t slice_group_map_type; // 0c
+      uint32_t pad1[0x60 / 4];
+      uint32_t u70; // 70
+      uint32_t u74; // 74
+      uint32_t u78; // 78
+      uint32_t num_ref_idx_l0_active_minus1; // 7c
+      uint32_t num_ref_idx_l1_active_minus1; // 80
+      uint32_t weighted_pred_flag; // 84
+      uint32_t weighted_bipred_idc; // 88
+      uint32_t pic_init_qp_minus26; // 8c
+      uint32_t chroma_qp_index_offset; // 90
+      uint32_t deblocking_filter_control_present_flag; // 94
+      uint32_t constrained_intra_pred_flag; // 98
+      uint32_t redundant_pic_cnt_present_flag; // 9c
+      uint32_t transform_8x8_mode_flag; // a0
+      uint32_t pad2[(0x1c8 - 0xa0 - 4) / 4];
+      uint32_t second_chroma_qp_index_offset; // 1c8
+      uint32_t u1cc; // 1cc
+      uint32_t curr_pic_order_cnt; // 1d0
+      uint32_t field_order_cnt[2]; // 1d4
+      uint32_t curr_mvidx; // 1dc
+      struct iref {
+         uint32_t u00; // 00
+         uint32_t field_is_ref; // 04 // bit0: top, bit1: bottom
+         uint8_t is_long_term; // 08
+         uint8_t non_existing; // 09
+         uint32_t frame_idx; // 0c
+         uint32_t field_order_cnt[2]; // 10
+         uint32_t mvidx; // 18
+         uint8_t field_pic_flag; // 1c
+         // 20
+      } refs[0x10]; // 1e0
+   } ipicparm; // 150
+};
+
+int
+nv84_decoder_bsp(struct nv84_decoder *dec,
+                 struct pipe_h264_picture_desc *desc,
+                 unsigned num_buffers,
+                 const void *const *data,
+                 const unsigned *num_bytes,
+                 struct nv84_video_buffer *dest)
+{
+   struct iparm params;
+   uint32_t more_params[0x44 / 4] = {0};
+   unsigned total_bytes = 0;
+   int i;
+   static const uint32_t end[] = {0x0b010000, 0, 0x0b010000, 0};
+   char indexes[17] = {0};
+   struct nouveau_pushbuf *push = dec->bsp_pushbuf;
+   struct nouveau_pushbuf_refn bo_refs[] = {
+      { dec->vpring, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->mbring, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->bitstream, NOUVEAU_BO_RDWR | NOUVEAU_BO_GART },
+      { dec->fence, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+   };
+
+   nouveau_bo_wait(dec->fence, NOUVEAU_BO_RDWR, dec->client);
+
+   STATIC_ASSERT(sizeof(struct iparm) == 0x530);
+
+   memset(&params, 0, sizeof(params));
+
+   dest->frame_num = dest->frame_num_max = desc->frame_num;
+
+   for (i = 0; i < 16; i++) {
+      struct iref *ref = &params.ipicparm.refs[i];
+      struct nv84_video_buffer *frame = (struct nv84_video_buffer *)desc->ref[i];
+      if (!frame) break;
+      /* The frame index is relative to the last IDR frame. So once the frame
+       * num goes back to 0, previous reference frames need to have a negative
+       * index.
+       */
+      if (desc->frame_num >= frame->frame_num_max) {
+         frame->frame_num_max = desc->frame_num;
+      } else {
+         frame->frame_num -= frame->frame_num_max + 1;
+         frame->frame_num_max = desc->frame_num;
+      }
+      ref->non_existing = 0;
+      ref->field_is_ref = (desc->top_is_reference[i] ? 1 : 0) |
+         (desc->bottom_is_reference[i] ? 2 : 0);
+      ref->is_long_term = desc->is_long_term[i];
+      ref->field_order_cnt[0] = desc->field_order_cnt_list[i][0];
+      ref->field_order_cnt[1] = desc->field_order_cnt_list[i][1];
+      ref->frame_idx = frame->frame_num;
+      ref->u00 = ref->mvidx = frame->mvidx;
+      ref->field_pic_flag = desc->field_pic_flag;
+      indexes[frame->mvidx] = 1;
+   }
+
+   /* Needs to be adjusted if we ever support non-4:2:0 videos */
+   params.iseqparm.chroma_format_idc = 1;
+
+   params.iseqparm.pic_width_in_mbs_minus1 = mb(dec->base.width) - 1;
+   if (desc->field_pic_flag)
+      params.iseqparm.pic_height_in_map_units_minus1 = mb_half(dec->base.height) - 1;
+   else
+      params.iseqparm.pic_height_in_map_units_minus1 = mb(dec->base.height) - 1;
+
+   /* TODO: interlaced still doesn't work, maybe due to ref frame management. */
+   if (desc->bottom_field_flag)
+      params.ipicparm.curr_pic_order_cnt = desc->field_order_cnt[1];
+   else
+      params.ipicparm.curr_pic_order_cnt = desc->field_order_cnt[0];
+   params.ipicparm.field_order_cnt[0] = desc->field_order_cnt[0];
+   params.ipicparm.field_order_cnt[1] = desc->field_order_cnt[1];
+   if (desc->is_reference) {
+      if (dest->mvidx < 0) {
+         for (i = 0; i < desc->num_ref_frames + 1; i++) {
+            if (!indexes[i]) {
+               dest->mvidx = i;
+               break;
+            }
+         }
+         assert(i != desc->num_ref_frames + 1);
+      }
+
+      params.ipicparm.u1cc = params.ipicparm.curr_mvidx = dest->mvidx;
+   }
+
+   params.iseqparm.num_ref_frames = desc->num_ref_frames;
+   params.iseqparm.mb_adaptive_frame_field_flag = desc->mb_adaptive_frame_field_flag;
+   params.ipicparm.constrained_intra_pred_flag = desc->constrained_intra_pred_flag;
+   params.ipicparm.weighted_pred_flag = desc->weighted_pred_flag;
+   params.ipicparm.weighted_bipred_idc = desc->weighted_bipred_idc;
+   params.iseqparm.frame_mbs_only_flag = desc->frame_mbs_only_flag;
+   params.ipicparm.transform_8x8_mode_flag = desc->transform_8x8_mode_flag;
+   params.ipicparm.chroma_qp_index_offset = desc->chroma_qp_index_offset;
+   params.ipicparm.second_chroma_qp_index_offset = desc->second_chroma_qp_index_offset;
+   params.ipicparm.pic_init_qp_minus26 = desc->pic_init_qp_minus26;
+   params.ipicparm.num_ref_idx_l0_active_minus1 = desc->num_ref_idx_l0_active_minus1;
+   params.ipicparm.num_ref_idx_l1_active_minus1 = desc->num_ref_idx_l1_active_minus1;
+   params.iseqparm.log2_max_frame_num_minus4 = desc->log2_max_frame_num_minus4;
+   params.iseqparm.pic_order_cnt_type = desc->pic_order_cnt_type;
+   params.iseqparm.log2_max_pic_order_cnt_lsb_minus4 = desc->log2_max_pic_order_cnt_lsb_minus4;
+   params.iseqparm.delta_pic_order_always_zero_flag = desc->delta_pic_order_always_zero_flag;
+   params.iseqparm.direct_8x8_inference_flag = desc->direct_8x8_inference_flag;
+   params.ipicparm.entropy_coding_mode_flag = desc->entropy_coding_mode_flag;
+   params.ipicparm.pic_order_present_flag = desc->pic_order_present_flag;
+   params.ipicparm.deblocking_filter_control_present_flag = desc->deblocking_filter_control_present_flag;
+   params.ipicparm.redundant_pic_cnt_present_flag = desc->redundant_pic_cnt_present_flag;
+
+   memcpy(dec->bitstream->map, &params, sizeof(params));
+   for (i = 0; i < num_buffers; i++) {
+      assert(total_bytes + num_bytes[i] < dec->bitstream->size / 2 - 0x700);
+      memcpy(dec->bitstream->map + 0x700 + total_bytes, data[i], num_bytes[i]);
+      total_bytes += num_bytes[i];
+   }
+   memcpy(dec->bitstream->map + 0x700 + total_bytes, end, sizeof(end));
+   total_bytes += sizeof(end);
+   more_params[1] = total_bytes;
+   memcpy(dec->bitstream->map + 0x600, more_params, sizeof(more_params));
+
+   PUSH_SPACE(push, 5 + 21 + 3 + 2 + 4 + 2);
+   nouveau_pushbuf_refn(push, bo_refs, sizeof(bo_refs)/sizeof(bo_refs[0]));
+
+   /* Wait for the fence = 1 */
+   BEGIN_NV04(push, SUBC_BSP(0x10), 4);
+   PUSH_DATAh(push, dec->fence->offset);
+   PUSH_DATA (push, dec->fence->offset);
+   PUSH_DATA (push, 1);
+   PUSH_DATA (push, 1);
+
+   /* TODO: Use both halves of bitstream/vpring for alternating frames */
+
+   /* Kick off the BSP */
+   BEGIN_NV04(push, SUBC_BSP(0x400), 20);
+   PUSH_DATA (push, dec->bitstream->offset >> 8);
+   PUSH_DATA (push, (dec->bitstream->offset >> 8) + 7);
+   PUSH_DATA (push, dec->bitstream->size / 2 - 0x700);
+   PUSH_DATA (push, (dec->bitstream->offset >> 8) + 6);
+   PUSH_DATA (push, 1);
+   PUSH_DATA (push, dec->mbring->offset >> 8);
+   PUSH_DATA (push, dec->frame_size);
+   PUSH_DATA (push, (dec->mbring->offset + dec->frame_size) >> 8);
+   PUSH_DATA (push, dec->vpring->offset >> 8);
+   PUSH_DATA (push, dec->vpring->size / 2);
+   PUSH_DATA (push, dec->vpring_residual);
+   PUSH_DATA (push, dec->vpring_ctrl);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, dec->vpring_residual);
+   PUSH_DATA (push, dec->vpring_residual + dec->vpring_ctrl);
+   PUSH_DATA (push, dec->vpring_deblock);
+   PUSH_DATA (push, (dec->vpring->offset + dec->vpring_ctrl +
+                     dec->vpring_residual + dec->vpring_deblock) >> 8);
+   PUSH_DATA (push, 0x654321);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, 0x100008);
+
+   BEGIN_NV04(push, SUBC_BSP(0x620), 2);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, 0);
+
+   BEGIN_NV04(push, SUBC_BSP(0x300), 1);
+   PUSH_DATA (push, 0);
+
+   /* Write fence = 2, intr */
+   BEGIN_NV04(push, SUBC_BSP(0x610), 3);
+   PUSH_DATAh(push, dec->fence->offset);
+   PUSH_DATA (push, dec->fence->offset);
+   PUSH_DATA (push, 2);
+
+   BEGIN_NV04(push, SUBC_BSP(0x304), 1);
+   PUSH_DATA (push, 0x101);
+   PUSH_KICK (push);
+   return 0;
+}
diff --git a/src/gallium/drivers/nv50/nv84_video_vp.c b/src/gallium/drivers/nv50/nv84_video_vp.c
new file mode 100644
index 0000000..78d9a39
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv84_video_vp.c
@@ -0,0 +1,547 @@
+/*
+ * Copyright 2013 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "nv84_video.h"
+
+#include "util/u_sse.h"
+
+struct h264_iparm1 {
+   uint8_t scaling_lists_4x4[6][16]; // 00
+   uint8_t scaling_lists_8x8[2][64]; // 60
+   uint32_t width; // e0
+   uint32_t height; // e4
+   uint64_t ref1_addrs[16]; // e8
+   uint64_t ref2_addrs[16]; // 168
+   uint32_t unk1e8;
+   uint32_t unk1ec;
+   uint32_t w1; // 1f0
+   uint32_t w2; // 1f4
+   uint32_t w3; // 1f8
+   uint32_t h1; // 1fc
+   uint32_t h2; // 200
+   uint32_t h3; // 204
+   uint32_t unk208;
+   uint32_t field_pic_flag;
+   uint32_t format;
+   uint32_t unk214;
+};
+
+struct h264_iparm2 {
+   uint32_t width; // 00
+   uint32_t height; // 04
+   uint32_t mbs; // 08
+   uint32_t w1; // 0c
+   uint32_t w2; // 10
+   uint32_t w3; // 14
+   uint32_t h1; // 18
+   uint32_t h2; // 1c
+   uint32_t h3; // 20
+   uint32_t unk24;
+   uint32_t unk28;
+   uint32_t top; // 2c
+   uint32_t bottom; // 30
+   uint32_t is_reference; // 34
+};
+
+void
+nv84_decoder_vp_h264(struct nv84_decoder *dec,
+                     struct pipe_h264_picture_desc *desc,
+                     struct nv84_video_buffer *dest)
+{
+   struct h264_iparm1 param1;
+   struct h264_iparm2 param2;
+   int i, width = align(dest->base.width, 16),
+      height = align(dest->base.height, 16);
+
+   struct nouveau_pushbuf *push = dec->vp_pushbuf;
+   struct nouveau_pushbuf_refn bo_refs[] = {
+      { dest->interlaced, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dest->full, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->vpring, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->mbring, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->vp_params, NOUVEAU_BO_RDWR | NOUVEAU_BO_GART },
+      { dec->fence, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+   };
+   int num_refs = sizeof(bo_refs)/sizeof(*bo_refs);
+   bool is_ref = desc->is_reference;
+
+   STATIC_ASSERT(sizeof(struct h264_iparm1) == 0x218);
+   STATIC_ASSERT(sizeof(struct h264_iparm2) == 0x38);
+
+   memset(&param1, 0, sizeof(param1));
+   memset(&param2, 0, sizeof(param2));
+
+   memcpy(&param1.scaling_lists_4x4, desc->scaling_lists_4x4,
+          sizeof(param1.scaling_lists_4x4));
+   memcpy(&param1.scaling_lists_8x8, desc->scaling_lists_8x8,
+          sizeof(param1.scaling_lists_8x8));
+
+   param1.width = width;
+   param1.w1 = param1.w2 = param1.w3 = align(width, 64);
+   param1.height = param1.h2 = height;
+   param1.h1 = param1.h3 = align(height, 32);
+   param1.format = 0x3231564e; /* 'NV12' */
+   param1.field_pic_flag = desc->field_pic_flag;
+
+   param2.width = width;
+   param2.w1 = param2.w2 = param2.w3 = param1.w1;
+   if (desc->field_pic_flag)
+      param2.height = align(height, 32) / 2;
+   else
+      param2.height = height;
+   param2.h1 = param2.h2 = align(height, 32);
+   param2.h3 = height;
+   param2.mbs = width * height >> 8;
+   if (desc->field_pic_flag) {
+      param2.top = desc->bottom_field_flag ? 2 : 1;
+      param2.bottom = desc->bottom_field_flag;
+   }
+   param2.is_reference = desc->is_reference;
+
+   PUSH_SPACE(push, 5 + 16 + 3 + 2 + 6 + (is_ref ? 2 : 0) + 3 + 2 + 4 + 2);
+
+   struct nouveau_bo *ref2_default = dest->full;
+
+   for (i = 0; i < 16; i++) {
+      struct nv84_video_buffer *buf = (struct nv84_video_buffer *)desc->ref[i];
+      struct nouveau_bo *bo1, *bo2;
+      if (buf) {
+         bo1 = buf->interlaced;
+         bo2 = buf->full;
+         if (i == 0)
+            ref2_default = buf->full;
+      } else {
+         bo1 = dest->interlaced;
+         bo2 = ref2_default;
+      }
+      param1.ref1_addrs[i] = bo1->offset;
+      param1.ref2_addrs[i] = bo2->offset;
+      struct nouveau_pushbuf_refn bo_refs[] = {
+         { bo1, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+         { bo2, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      };
+      nouveau_pushbuf_refn(push, bo_refs, sizeof(bo_refs)/sizeof(bo_refs[0]));
+   }
+
+   memcpy(dec->vp_params->map, &param1, sizeof(param1));
+   memcpy(dec->vp_params->map + 0x400, &param2, sizeof(param2));
+
+   nouveau_pushbuf_refn(push, bo_refs, num_refs);
+
+   /* Wait for BSP to have completed */
+   BEGIN_NV04(push, SUBC_VP(0x10), 4);
+   PUSH_DATAh(push, dec->fence->offset);
+   PUSH_DATA (push, dec->fence->offset);
+   PUSH_DATA (push, 2);
+   PUSH_DATA (push, 1); /* wait for sem == 2 */
+
+   /* VP step 1 */
+   BEGIN_NV04(push, SUBC_VP(0x400), 15);
+   PUSH_DATA (push, 1);
+   PUSH_DATA (push, param2.mbs);
+   PUSH_DATA (push, 0x3987654); /* each nibble probably a dma index */
+   PUSH_DATA (push, 0x55001); /* constant */
+   PUSH_DATA (push, dec->vp_params->offset >> 8);
+   PUSH_DATA (push, (dec->vpring->offset + dec->vpring_residual) >> 8);
+   PUSH_DATA (push, dec->vpring_ctrl);
+   PUSH_DATA (push, dec->vpring->offset >> 8);
+   PUSH_DATA (push, dec->bitstream->size / 2 - 0x700);
+   PUSH_DATA (push, (dec->mbring->offset + dec->mbring->size - 0x2000) >> 8);
+   PUSH_DATA (push, (dec->vpring->offset + dec->vpring_ctrl +
+                     dec->vpring_residual + dec->vpring_deblock) >> 8);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, 0x100008);
+   PUSH_DATA (push, dest->interlaced->offset >> 8);
+   PUSH_DATA (push, 0);
+
+   BEGIN_NV04(push, SUBC_VP(0x620), 2);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, 0);
+
+   BEGIN_NV04(push, SUBC_VP(0x300), 1);
+   PUSH_DATA (push, 0);
+
+   /* VP step 2 */
+   BEGIN_NV04(push, SUBC_VP(0x400), 5);
+   PUSH_DATA (push, 0x54530201);
+   PUSH_DATA (push, (dec->vp_params->offset >> 8) + 0x4);
+   PUSH_DATA (push, (dec->vpring->offset + dec->vpring_ctrl +
+                     dec->vpring_residual) >> 8);
+   PUSH_DATA (push, dest->interlaced->offset >> 8);
+   PUSH_DATA (push, dest->interlaced->offset >> 8);
+
+   if (is_ref) {
+      BEGIN_NV04(push, SUBC_VP(0x414), 1);
+      PUSH_DATA (push, dest->full->offset >> 8);
+   }
+
+   BEGIN_NV04(push, SUBC_VP(0x620), 2);
+   PUSH_DATAh(push, dec->vp_fw2_offset);
+   PUSH_DATA (push, dec->vp_fw2_offset);
+
+   BEGIN_NV04(push, SUBC_VP(0x300), 1);
+   PUSH_DATA (push, 0);
+
+   /* Set the semaphore back to 1 */
+   BEGIN_NV04(push, SUBC_VP(0x610), 3);
+   PUSH_DATAh(push, dec->fence->offset);
+   PUSH_DATA (push, dec->fence->offset);
+   PUSH_DATA (push, 1);
+
+   /* Write to the semaphore location, intr */
+   BEGIN_NV04(push, SUBC_VP(0x304), 1);
+   PUSH_DATA (push, 0x101);
+
+   for (i = 0; i < 2; i++) {
+      struct nv50_miptree *mt = nv50_miptree(dest->resources[i]);
+      mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
+   }
+
+   PUSH_KICK (push);
+}
+
+static INLINE int16_t inverse_quantize(int16_t val, uint8_t quant, int mpeg1) {
+   int16_t ret = val * quant / 16;
+   if (mpeg1 && ret) {
+      if (ret > 0)
+         ret = (ret - 1) | 1;
+      else
+         ret = (ret + 1) | 1;
+   }
+   if (ret < -2048)
+      ret = -2048;
+   else if (ret > 2047)
+      ret = 2047;
+   return ret;
+}
+
+struct mpeg12_mb_info {
+   uint32_t index;
+   uint8_t unk4;
+   uint8_t unk5;
+   uint16_t coded_block_pattern;
+   uint8_t block_counts[6];
+   uint16_t PMV[8];
+   uint16_t skipped;
+};
+
+void
+nv84_decoder_vp_mpeg12_mb(struct nv84_decoder *dec,
+                          struct pipe_mpeg12_picture_desc *desc,
+                          const struct pipe_mpeg12_macroblock *macrob)
+{
+   STATIC_ASSERT(sizeof(struct mpeg12_mb_info) == 32);
+
+   struct mpeg12_mb_info info = {0};
+   int i, sum = 0, mask, block_index, count;
+   const int16_t *blocks;
+   int intra = macrob->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA;
+   int motion = macrob->macroblock_type &
+      (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD);
+   const uint8_t *quant_matrix = intra ? dec->mpeg12_intra_matrix :
+      dec->mpeg12_non_intra_matrix;
+   int mpeg1 = dec->base.profile == PIPE_VIDEO_PROFILE_MPEG1;
+
+   info.index = macrob->y * mb(dec->base.width) + macrob->x;
+   info.unk4 = motion;
+   if (intra)
+      info.unk4 |= 1;
+   if (macrob->macroblock_modes.bits.dct_type)
+      info.unk4 |= 0x20;
+   info.unk5 = (macrob->motion_vertical_field_select << 4) |
+      (macrob->macroblock_modes.value & 0xf);
+   info.coded_block_pattern = macrob->coded_block_pattern;
+   if (motion) {
+      memcpy(info.PMV, macrob->PMV, sizeof(info.PMV));
+   }
+   blocks = macrob->blocks;
+   for (mask = 0x20, block_index = 0; mask > 0; mask >>= 1, block_index++) {
+      if ((macrob->coded_block_pattern & mask) == 0)
+         continue;
+
+      count = 0;
+
+      /*
+       * The observation here is that there are a lot of 0's, and things go
+       * a lot faster if one skips over them.
+       */
+
+#ifdef PIPE_ARCH_SSE
+/* This returns a 16-bit bit-mask, each 2 bits are both 1 or both 0, depending
+ * on whether the corresponding (16-bit) word in blocks is zero or non-zero. */
+#define wordmask(blocks, zero) \
+      (uint64_t)(_mm_movemask_epi8( \
+                       _mm_cmpeq_epi16( \
+                             zero, _mm_load_si128((__m128i *)(blocks)))))
+
+      __m128i zero = _mm_setzero_si128();
+
+      /* TODO: Look into doing the inverse quantization in terms of SSE
+       * operations unconditionally, when necessary. */
+      uint64_t bmask0 = wordmask(blocks, zero);
+      bmask0 |= wordmask(blocks + 8, zero) << 16;
+      bmask0 |= wordmask(blocks + 16, zero) << 32;
+      bmask0 |= wordmask(blocks + 24, zero) << 48;
+      uint64_t bmask1 = wordmask(blocks + 32, zero);
+      bmask1 |= wordmask(blocks + 40, zero) << 16;
+      bmask1 |= wordmask(blocks + 48, zero) << 32;
+      bmask1 |= wordmask(blocks + 56, zero) << 48;
+
+      /* The wordmask macro returns the inverse of what we want, since it
+       * returns a 1 for equal-to-zero. Invert. */
+      bmask0 = ~bmask0;
+      bmask1 = ~bmask1;
+
+      /* Note that the bitmask is actually sequences of 2 bits for each block
+       * index. This is because there is no movemask_epi16. That means that
+       * (a) ffs will never return 64, since the prev bit will always be set
+       * in that case, and (b) we need to do an extra bit shift. Or'ing the
+       * bitmasks together is faster than having a loop that computes them one
+       * at a time and processes them, on a Core i7-920. Trying to put bmask
+       * into an array and then looping also slows things down.
+       */
+
+      /* shift needs to be the same width as i, and unsigned so that / 2
+       * becomes a rshift operation */
+      uint32_t shift;
+      i = 0;
+
+      /* XXX avoid __builtin_ffsll as it translates into a (relatively)
+       * expensive function call on 32-bit, potentially losing out on the
+       * gains of using this approach. */
+
+      if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+         int16_t tmp;
+         while ((shift = __builtin_ffsll(bmask0))) {
+            i += (shift - 1) / 2;
+            bmask0 >>= shift - 1;
+            *dec->mpeg12_data++ = dec->zscan[i] * 2;
+            tmp = inverse_quantize(blocks[i], quant_matrix[i], mpeg1);
+            *dec->mpeg12_data++ = tmp;
+            sum += tmp;
+            count++;
+            i++;
+            bmask0 >>= 2;
+         }
+         i = 32;
+         while ((shift = __builtin_ffsll(bmask1))) {
+            i += (shift - 1) / 2;
+            bmask1 >>= shift - 1;
+            *dec->mpeg12_data++ = dec->zscan[i] * 2;
+            tmp = inverse_quantize(blocks[i], quant_matrix[i], mpeg1);
+            *dec->mpeg12_data++ = tmp;
+            sum += tmp;
+            count++;
+            i++;
+            bmask1 >>= 2;
+         }
+      } else {
+         while ((shift = __builtin_ffsll(bmask0))) {
+            i += (shift - 1) / 2;
+            bmask0 >>= shift - 1;
+            *dec->mpeg12_data++ = i * 2;
+            *dec->mpeg12_data++ = blocks[i];
+            count++;
+            i++;
+            bmask0 >>= 2;
+         }
+         i = 32;
+         while ((shift = __builtin_ffsll(bmask1))) {
+            i += (shift - 1) / 2;
+            bmask1 >>= shift - 1;
+            *dec->mpeg12_data++ = i * 2;
+            *dec->mpeg12_data++ = blocks[i];
+            count++;
+            i++;
+            bmask1 >>= 2;
+         }
+      }
+#undef wordmask
+#else
+
+      /*
+       * This loop looks ridiculously written... and it is. I tried a lot of
+       * different ways of achieving this scan, and this was the fastest, at
+       * least on a Core i7-920. Note that it's not necessary to skip the 0's,
+       * the firmware will deal with those just fine. But it's faster to skip
+       * them.
+       */
+      i = 0;
+      if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+         while (true) {
+            int16_t tmp;
+            // All the CPU time goes into this line.
+            while (likely(i < 64 && !(tmp = blocks[i]))) i++;
+            if (i >= 64) break;
+            *dec->mpeg12_data++ = dec->zscan[i] * 2;
+            tmp = inverse_quantize(tmp, quant_matrix[i], mpeg1);
+            *dec->mpeg12_data++ = tmp;
+            sum += tmp;
+            count++;
+            i++;
+         }
+      } else {
+         while (true) {
+            int16_t tmp;
+            // All the CPU time goes into this line.
+            while (likely(i < 64 && !(tmp = blocks[i]))) i++;
+            if (i >= 64) break;
+            *dec->mpeg12_data++ = i * 2;
+            *dec->mpeg12_data++ = tmp;
+            count++;
+            i++;
+         }
+      }
+
+#endif
+
+      if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+         if (!mpeg1 && (sum & 1) == 0) {
+            if (count && *(dec->mpeg12_data - 2) == 63 * 2) {
+               uint16_t *val = dec->mpeg12_data - 1;
+               if (*val & 1) *val -= 1;
+               else *val += 1;
+            } else {
+               *dec->mpeg12_data++ = 63 * 2;
+               *dec->mpeg12_data++ = 1;
+               count++;
+            }
+         }
+      }
+
+      if (count) {
+         *(dec->mpeg12_data - 2) |= 1;
+      } else {
+         *dec->mpeg12_data++ = 1;
+         *dec->mpeg12_data++ = 0;
+         count = 1;
+      }
+      info.block_counts[block_index] = count;
+      blocks += 64;
+   }
+
+   memcpy(dec->mpeg12_mb_info, &info, sizeof(info));
+   dec->mpeg12_mb_info += sizeof(info);
+
+   if (macrob->num_skipped_macroblocks) {
+      info.index++;
+      info.coded_block_pattern = 0;
+      info.skipped = macrob->num_skipped_macroblocks - 1;
+      memset(info.block_counts, 0, sizeof(info.block_counts));
+      memcpy(dec->mpeg12_mb_info, &info, sizeof(info));
+      dec->mpeg12_mb_info += sizeof(info);
+   }
+}
+
+struct mpeg12_header {
+   uint32_t luma_top_size; // 00
+   uint32_t luma_bottom_size; // 04
+   uint32_t chroma_top_size; // 08
+   uint32_t mbs; // 0c
+   uint32_t mb_info_size; // 10
+   uint32_t mb_width_minus1; // 14
+   uint32_t mb_height_minus1; // 18
+   uint32_t width; // 1c
+   uint32_t height; // 20
+   uint8_t progressive; // 24
+   uint8_t mocomp_only; // 25
+   uint8_t frames; // 26
+   uint8_t picture_structure; // 27
+   uint32_t unk28; // 28 -- 0x50100
+   uint32_t unk2c; // 2c
+   uint32_t pad[4 * 13];
+};
+
+void
+nv84_decoder_vp_mpeg12(struct nv84_decoder *dec,
+                       struct pipe_mpeg12_picture_desc *desc,
+                       struct nv84_video_buffer *dest)
+{
+   struct nouveau_pushbuf *push = dec->vp_pushbuf;
+   struct nv84_video_buffer *ref1 = (struct nv84_video_buffer *)desc->ref[0];
+   struct nv84_video_buffer *ref2 = (struct nv84_video_buffer *)desc->ref[1];
+   struct nouveau_pushbuf_refn bo_refs[] = {
+      { dest->interlaced, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { NULL, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { NULL, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->mpeg12_bo, NOUVEAU_BO_RDWR | NOUVEAU_BO_GART },
+   };
+   int i, num_refs = sizeof(bo_refs) / sizeof(*bo_refs);
+   struct mpeg12_header header = {0};
+   struct nv50_miptree *y = nv50_miptree(dest->resources[0]);
+   struct nv50_miptree *uv = nv50_miptree(dest->resources[1]);
+
+   STATIC_ASSERT(sizeof(struct mpeg12_header) == 0x100);
+
+   if (ref1 == NULL)
+      ref1 = dest;
+   if (ref2 == NULL)
+      ref2 = dest;
+   bo_refs[1].bo = ref1->interlaced;
+   bo_refs[2].bo = ref2->interlaced;
+
+   header.luma_top_size = y->layer_stride;
+   header.luma_bottom_size = y->layer_stride;
+   header.chroma_top_size = uv->layer_stride;
+   header.mbs = mb(dec->base.width) * mb(dec->base.height);
+   header.mb_info_size = dec->mpeg12_mb_info - dec->mpeg12_bo->map - 0x100;
+   header.mb_width_minus1 = mb(dec->base.width) - 1;
+   header.mb_height_minus1 = mb(dec->base.height) - 1;
+   header.width = align(dec->base.width, 16);
+   header.height = align(dec->base.height, 16);
+   header.progressive = desc->frame_pred_frame_dct;
+   header.frames = 1 + (desc->ref[0] != NULL) + (desc->ref[1] != NULL);
+   header.picture_structure = desc->picture_structure;
+   header.unk28 = 0x50100;
+
+   memcpy(dec->mpeg12_bo->map, &header, sizeof(header));
+
+   PUSH_SPACE(push, 10 + 3 + 2);
+
+   nouveau_pushbuf_refn(push, bo_refs, num_refs);
+
+   BEGIN_NV04(push, SUBC_VP(0x400), 9);
+   PUSH_DATA (push, 0x543210); /* each nibble possibly a dma index */
+   PUSH_DATA (push, 0x555001); /* constant */
+   PUSH_DATA (push, dec->mpeg12_bo->offset >> 8);
+   PUSH_DATA (push, (dec->mpeg12_bo->offset + 0x100) >> 8);
+   PUSH_DATA (push, (dec->mpeg12_bo->offset + 0x100 +
+                     align(0x20 * mb(dec->base.width) *
+                           mb(dec->base.height), 0x100)) >> 8);
+   PUSH_DATA (push, dest->interlaced->offset >> 8);
+   PUSH_DATA (push, ref1->interlaced->offset >> 8);
+   PUSH_DATA (push, ref2->interlaced->offset >> 8);
+   PUSH_DATA (push, 6 * 64 * 8 * header.mbs);
+
+   BEGIN_NV04(push, SUBC_VP(0x620), 2);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, 0);
+
+   BEGIN_NV04(push, SUBC_VP(0x300), 1);
+   PUSH_DATA (push, 0);
+
+   for (i = 0; i < 2; i++) {
+      struct nv50_miptree *mt = nv50_miptree(dest->resources[i]);
+      mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
+   }
+   PUSH_KICK (push);
+}
-- 
1.8.1.5

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v3] nv50: H.264/MPEG2 decoding support via VP2, available on NV84-NV96, NVA0
       [not found]     ` <1372569420-18489-1-git-send-email-imirkin-FrUbXkNCsVf2fBVCVOL8/A@public.gmane.org>
@ 2013-07-16 21:50       ` Ilia Mirkin
  0 siblings, 0 replies; 7+ messages in thread
From: Ilia Mirkin @ 2013-07-16 21:50 UTC (permalink / raw)
  To: Maarten Lankhorst
  Cc: mesa-dev-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Adds H.264 and MPEG2 codec support via VP2, using firmware from the
blob. Acceleration is supported at the bitstream level for H.264 and
IDCT level for MPEG2.

Known issues:
 - H.264 interlaced doesn't render properly
 - H.264 shows very occasional artifacts on a small fraction of videos
 - MPEG2 + VDPAU shows frequent but small artifacts, which aren't there
   when using XvMC on the same videos

Signed-off-by: Ilia Mirkin <imirkin-FrUbXkNCsVf2fBVCVOL8/A@public.gmane.org>
---

The kernel support for this is upstream now (in 3.11-rc1), might be nice to
get this into Mesa 9.2.

v2 -> v3:

 - Restricted MPEG2 optimizations to x86-64 (all of which include sse2)

 - Fixed logic to be able to allow non-NV12 video surfaces when no video
   profile is specified.

v1 -> v2:

 - Replaced SSE4.2 code with SSE2 code -- it's a little faster, and works on
   all machines likely to end up with one of these cards.

 - Style cleanups suggested by Emil Velikov. Emil, let me know if there were
   things that I said I'd do but I forgot about (or if there are more things
   that you notice).


 src/gallium/drivers/nv50/Makefile.sources |   5 +-
 src/gallium/drivers/nv50/nv50_context.c   |  13 +-
 src/gallium/drivers/nv50/nv50_context.h   |  24 +
 src/gallium/drivers/nv50/nv50_miptree.c   |  27 +
 src/gallium/drivers/nv50/nv50_resource.h  |   1 +
 src/gallium/drivers/nv50/nv50_screen.c    |  13 +-
 src/gallium/drivers/nv50/nv50_winsys.h    |   4 +
 src/gallium/drivers/nv50/nv84_video.c     | 796 ++++++++++++++++++++++++++++++
 src/gallium/drivers/nv50/nv84_video.h     | 134 +++++
 src/gallium/drivers/nv50/nv84_video_bsp.c | 251 ++++++++++
 src/gallium/drivers/nv50/nv84_video_vp.c  | 550 +++++++++++++++++++++
 11 files changed, 1815 insertions(+), 3 deletions(-)
 create mode 100644 src/gallium/drivers/nv50/nv84_video.c
 create mode 100644 src/gallium/drivers/nv50/nv84_video.h
 create mode 100644 src/gallium/drivers/nv50/nv84_video_bsp.c
 create mode 100644 src/gallium/drivers/nv50/nv84_video_vp.c

diff --git a/src/gallium/drivers/nv50/Makefile.sources b/src/gallium/drivers/nv50/Makefile.sources
index 1092570..0fdac51 100644
--- a/src/gallium/drivers/nv50/Makefile.sources
+++ b/src/gallium/drivers/nv50/Makefile.sources
@@ -13,7 +13,10 @@ C_SOURCES := \
 	nv50_program.c \
 	nv50_shader_state.c \
 	nv50_push.c \
-	nv50_query.c
+	nv50_query.c \
+	nv84_video.c \
+	nv84_video_bsp.c \
+	nv84_video_vp.c
 
 CODEGEN_NV50_SOURCES := \
 	codegen/nv50_ir.cpp \
diff --git a/src/gallium/drivers/nv50/nv50_context.c b/src/gallium/drivers/nv50/nv50_context.c
index 5781c4b..79a0473 100644
--- a/src/gallium/drivers/nv50/nv50_context.c
+++ b/src/gallium/drivers/nv50/nv50_context.c
@@ -258,7 +258,18 @@ nv50_create(struct pipe_screen *pscreen, void *priv)
    draw_set_rasterize_stage(nv50->draw, nv50_draw_render_stage(nv50));
 #endif
 
-   nouveau_context_init_vdec(&nv50->base);
+   if (screen->base.device->chipset < 0x84) {
+      /* PMPEG */
+      nouveau_context_init_vdec(&nv50->base);
+   } else if (screen->base.device->chipset < 0x98 ||
+              screen->base.device->chipset == 0xa0) {
+      /* VP2 */
+      pipe->create_video_decoder = nv84_create_decoder;
+      pipe->create_video_buffer = nv84_video_buffer_create;
+   } else {
+      /* Unsupported, but need to init pointers. */
+      nouveau_context_init_vdec(&nv50->base);
+   }
 
    flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RD;
 
diff --git a/src/gallium/drivers/nv50/nv50_context.h b/src/gallium/drivers/nv50/nv50_context.h
index 0a83131..b204cc8 100644
--- a/src/gallium/drivers/nv50/nv50_context.h
+++ b/src/gallium/drivers/nv50/nv50_context.h
@@ -289,4 +289,28 @@ void nv50_vertex_arrays_validate(struct nv50_context *nv50);
 /* nv50_push.c */
 void nv50_push_vbo(struct nv50_context *, const struct pipe_draw_info *);
 
+/* nv84_video.c */
+struct pipe_video_decoder *
+nv84_create_decoder(struct pipe_context *context,
+                    enum pipe_video_profile profile,
+                    enum pipe_video_entrypoint entrypoint,
+                    enum pipe_video_chroma_format chroma_format,
+                    unsigned width, unsigned height,
+                    unsigned max_references,
+                    bool expect_chunked_decode);
+
+struct pipe_video_buffer *
+nv84_video_buffer_create(struct pipe_context *pipe,
+                         const struct pipe_video_buffer *template);
+
+int
+nv84_screen_get_video_param(struct pipe_screen *pscreen,
+                            enum pipe_video_profile profile,
+                            enum pipe_video_cap param);
+
+boolean
+nv84_screen_video_supported(struct pipe_screen *screen,
+                            enum pipe_format format,
+                            enum pipe_video_profile profile);
+
 #endif
diff --git a/src/gallium/drivers/nv50/nv50_miptree.c b/src/gallium/drivers/nv50/nv50_miptree.c
index 036f1c7..28be768 100644
--- a/src/gallium/drivers/nv50/nv50_miptree.c
+++ b/src/gallium/drivers/nv50/nv50_miptree.c
@@ -239,6 +239,28 @@ nv50_miptree_init_layout_linear(struct nv50_miptree *mt, unsigned pitch_align)
 }
 
 static void
+nv50_miptree_init_layout_video(struct nv50_miptree *mt)
+{
+   const struct pipe_resource *pt = &mt->base.base;
+   const unsigned blocksize = util_format_get_blocksize(pt->format);
+
+   assert(pt->last_level == 0);
+   assert(mt->ms_x == 0 && mt->ms_y == 0);
+   assert(!util_format_is_compressed(pt->format));
+
+   mt->layout_3d = pt->target == PIPE_TEXTURE_3D;
+
+   mt->level[0].tile_mode = 0x20;
+   mt->level[0].pitch = align(pt->width0 * blocksize, 64);
+   mt->total_size = align(pt->height0, 16) * mt->level[0].pitch * (mt->layout_3d ? pt->depth0 : 1);
+
+   if (pt->array_size > 1) {
+      mt->layer_stride = align(mt->total_size, NV50_TILE_SIZE(0x20));
+      mt->total_size = mt->layer_stride * pt->array_size;
+   }
+}
+
+static void
 nv50_miptree_init_layout_tiled(struct nv50_miptree *mt)
 {
    struct pipe_resource *pt = &mt->base.base;
@@ -311,6 +333,11 @@ nv50_miptree_create(struct pipe_screen *pscreen,
       return NULL;
    }
 
+   if (unlikely(pt->flags & NV50_RESOURCE_FLAG_VIDEO)) {
+      nv50_miptree_init_layout_video(mt);
+      /* BO allocation done by client */
+      return pt;
+   } else
    if (bo_config.nv50.memtype != 0) {
       nv50_miptree_init_layout_tiled(mt);
    } else
diff --git a/src/gallium/drivers/nv50/nv50_resource.h b/src/gallium/drivers/nv50/nv50_resource.h
index 6b92463..c520a72 100644
--- a/src/gallium/drivers/nv50/nv50_resource.h
+++ b/src/gallium/drivers/nv50/nv50_resource.h
@@ -16,6 +16,7 @@ nv50_init_resource_functions(struct pipe_context *pcontext);
 void
 nv50_screen_init_resource_functions(struct pipe_screen *pscreen);
 
+#define NV50_RESOURCE_FLAG_VIDEO (NOUVEAU_RESOURCE_FLAG_DRV_PRIV << 0)
 
 #define NV50_TILE_SHIFT_X(m) 6
 #define NV50_TILE_SHIFT_Y(m) ((((m) >> 4) & 0xf) + 2)
diff --git a/src/gallium/drivers/nv50/nv50_screen.c b/src/gallium/drivers/nv50/nv50_screen.c
index 5c57aa2..2ed85d6 100644
--- a/src/gallium/drivers/nv50/nv50_screen.c
+++ b/src/gallium/drivers/nv50/nv50_screen.c
@@ -645,7 +645,18 @@ nv50_screen_create(struct nouveau_device *dev)
 
    nv50_screen_init_resource_functions(pscreen);
 
-   nouveau_screen_init_vdec(&screen->base);
+   if (screen->base.device->chipset < 0x84) {
+      /* PMPEG */
+      nouveau_screen_init_vdec(&screen->base);
+   } else if (screen->base.device->chipset < 0x98 ||
+              screen->base.device->chipset == 0xa0) {
+      /* VP2 */
+      screen->base.base.get_video_param = nv84_screen_get_video_param;
+      screen->base.base.is_video_format_supported = nv84_screen_video_supported;
+   } else {
+      /* Unsupported, but need to init pointers. */
+      nouveau_screen_init_vdec(&screen->base);
+   }
 
    ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0, 4096,
                         NULL, &screen->fence.bo);
diff --git a/src/gallium/drivers/nv50/nv50_winsys.h b/src/gallium/drivers/nv50/nv50_winsys.h
index 145ee70..e04247b 100644
--- a/src/gallium/drivers/nv50/nv50_winsys.h
+++ b/src/gallium/drivers/nv50/nv50_winsys.h
@@ -60,6 +60,10 @@ PUSH_REFN(struct nouveau_pushbuf *push, struct nouveau_bo *bo, uint32_t flags)
 #define SUBC_COMPUTE(m) 6, (m)
 #define NV50_COMPUTE(n) SUBC_COMPUTE(NV50_COMPUTE_##n)
 
+/* These are expected to be on their own pushbufs */
+#define SUBC_BSP(m) 2, (m)
+#define SUBC_VP(m) 2, (m)
+
 
 static INLINE uint32_t
 NV50_FIFO_PKHDR(int subc, int mthd, unsigned size)
diff --git a/src/gallium/drivers/nv50/nv84_video.c b/src/gallium/drivers/nv50/nv84_video.c
new file mode 100644
index 0000000..d5f6295
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv84_video.c
@@ -0,0 +1,796 @@
+/*
+ * Copyright 2013 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+
+#include "util/u_format.h"
+#include "util/u_sampler.h"
+#include "vl/vl_zscan.h"
+
+#include "nv84_video.h"
+
+static int
+nv84_copy_firmware(const char *path, void *dest, ssize_t len)
+{
+   int fd = open(path, O_RDONLY | O_CLOEXEC);
+   ssize_t r;
+   if (fd < 0) {
+      fprintf(stderr, "opening firmware file %s failed: %m\n", path);
+      return 1;
+   }
+   r = read(fd, dest, len);
+   close(fd);
+
+   if (r != len) {
+      fprintf(stderr, "reading firwmare file %s failed: %m\n", path);
+      return 1;
+   }
+
+   return 0;
+}
+
+static int
+filesize(const char *path)
+{
+   int ret;
+   struct stat statbuf;
+
+   ret = stat(path, &statbuf);
+   if (ret)
+      return ret;
+   return statbuf.st_size;
+}
+
+static struct nouveau_bo *
+nv84_load_firmwares(struct nouveau_device *dev, struct nv84_decoder *dec,
+                    const char *fw1, const char *fw2)
+{
+   int ret, size1, size2 = 0;
+   struct nouveau_bo *fw;
+
+   size1 = filesize(fw1);
+   if (fw2)
+      size2 = filesize(fw2);
+   if (size1 < 0 || size2 < 0)
+      return NULL;
+
+   dec->vp_fw2_offset = align(size1, 0x100);
+
+   ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 0, dec->vp_fw2_offset + size2, NULL, &fw);
+   if (ret)
+      return NULL;
+   ret = nouveau_bo_map(fw, NOUVEAU_BO_WR, dec->client);
+   if (ret)
+      goto error;
+
+   ret = nv84_copy_firmware(fw1, fw->map, size1);
+   if (fw2 && !ret)
+      ret = nv84_copy_firmware(fw2, fw->map + dec->vp_fw2_offset, size2);
+   munmap(fw->map, fw->size);
+   fw->map = NULL;
+   if (!ret)
+      return fw;
+error:
+   nouveau_bo_ref(NULL, &fw);
+   return NULL;
+}
+
+static struct nouveau_bo *
+nv84_load_bsp_firmware(struct nouveau_device *dev, struct nv84_decoder *dec)
+{
+   return nv84_load_firmwares(
+         dev, dec, "/lib/firmware/nouveau/nv84_bsp-h264", NULL);
+}
+
+static struct nouveau_bo *
+nv84_load_vp_firmware(struct nouveau_device *dev, struct nv84_decoder *dec)
+{
+   return nv84_load_firmwares(
+         dev, dec,
+         "/lib/firmware/nouveau/nv84_vp-h264-1",
+         "/lib/firmware/nouveau/nv84_vp-h264-2");
+}
+
+static struct nouveau_bo *
+nv84_load_vp_firmware_mpeg(struct nouveau_device *dev, struct nv84_decoder *dec)
+{
+   return nv84_load_firmwares(
+         dev, dec, "/lib/firmware/nouveau/nv84_vp-mpeg12", NULL);
+}
+
+static void
+nv84_decoder_decode_bitstream_h264(struct pipe_video_decoder *decoder,
+                                   struct pipe_video_buffer *video_target,
+                                   struct pipe_picture_desc *picture,
+                                   unsigned num_buffers,
+                                   const void *const *data,
+                                   const unsigned *num_bytes)
+{
+   struct nv84_decoder *dec = (struct nv84_decoder *)decoder;
+   struct nv84_video_buffer *target = (struct nv84_video_buffer *)video_target;
+
+   struct pipe_h264_picture_desc *desc = (struct pipe_h264_picture_desc *)picture;
+
+   assert(target->base.buffer_format == PIPE_FORMAT_NV12);
+
+   nv84_decoder_bsp(dec, desc, num_buffers, data, num_bytes, target);
+   nv84_decoder_vp_h264(dec, desc, target);
+}
+
+static void
+nv84_decoder_flush(struct pipe_video_decoder *decoder)
+{
+}
+
+static void
+nv84_decoder_begin_frame_h264(struct pipe_video_decoder *decoder,
+                              struct pipe_video_buffer *target,
+                              struct pipe_picture_desc *picture)
+{
+}
+
+static void
+nv84_decoder_end_frame_h264(struct pipe_video_decoder *decoder,
+                            struct pipe_video_buffer *target,
+                            struct pipe_picture_desc *picture)
+{
+}
+
+static void
+nv84_decoder_decode_bitstream_mpeg12(struct pipe_video_decoder *decoder,
+                                     struct pipe_video_buffer *video_target,
+                                     struct pipe_picture_desc *picture,
+                                     unsigned num_buffers,
+                                     const void *const *data,
+                                     const unsigned *num_bytes)
+{
+   struct nv84_decoder *dec = (struct nv84_decoder *)decoder;
+
+   assert(video_target->buffer_format == PIPE_FORMAT_NV12);
+
+   vl_mpg12_bs_decode(dec->mpeg12_bs,
+                      video_target,
+                      (struct pipe_mpeg12_picture_desc *)picture,
+                      num_buffers,
+                      data,
+                      num_bytes);
+}
+
+static void
+nv84_decoder_begin_frame_mpeg12(struct pipe_video_decoder *decoder,
+                              struct pipe_video_buffer *target,
+                              struct pipe_picture_desc *picture)
+{
+   struct nv84_decoder *dec = (struct nv84_decoder *)decoder;
+   struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
+   int i;
+
+   nouveau_bo_wait(dec->mpeg12_bo, NOUVEAU_BO_RDWR, dec->client);
+   dec->mpeg12_mb_info = dec->mpeg12_bo->map + 0x100;
+   dec->mpeg12_data = dec->mpeg12_bo->map + 0x100 +
+      align(0x20 * mb(dec->base.width) * mb(dec->base.height), 0x100);
+   if (desc->intra_matrix) {
+      dec->zscan = desc->alternate_scan ? vl_zscan_alternate : vl_zscan_normal;
+      for (i = 0; i < 64; i++) {
+         dec->mpeg12_intra_matrix[i] = desc->intra_matrix[dec->zscan[i]];
+         dec->mpeg12_non_intra_matrix[i] = desc->non_intra_matrix[dec->zscan[i]];
+      }
+      dec->mpeg12_intra_matrix[0] = 1 << (7 - desc->intra_dc_precision);
+   }
+}
+
+static void
+nv84_decoder_end_frame_mpeg12(struct pipe_video_decoder *decoder,
+                              struct pipe_video_buffer *target,
+                              struct pipe_picture_desc *picture)
+{
+   nv84_decoder_vp_mpeg12(
+         (struct nv84_decoder *)decoder,
+         (struct pipe_mpeg12_picture_desc *)picture,
+         (struct nv84_video_buffer *)target);
+}
+
+static void
+nv84_decoder_decode_macroblock(struct pipe_video_decoder *decoder,
+                               struct pipe_video_buffer *target,
+                               struct pipe_picture_desc *picture,
+                               const struct pipe_macroblock *macroblocks,
+                               unsigned num_macroblocks)
+{
+   const struct pipe_mpeg12_macroblock *mb = (const struct pipe_mpeg12_macroblock *)macroblocks;
+   for (int i = 0; i < num_macroblocks; i++, mb++) {
+      nv84_decoder_vp_mpeg12_mb(
+            (struct nv84_decoder *)decoder,
+            (struct pipe_mpeg12_picture_desc *)picture,
+            mb);
+   }
+}
+
+static void
+nv84_decoder_destroy(struct pipe_video_decoder *decoder)
+{
+   struct nv84_decoder *dec = (struct nv84_decoder *)decoder;
+
+   nouveau_bo_ref(NULL, &dec->bsp_fw);
+   nouveau_bo_ref(NULL, &dec->bsp_data);
+   nouveau_bo_ref(NULL, &dec->vp_fw);
+   nouveau_bo_ref(NULL, &dec->vp_data);
+   nouveau_bo_ref(NULL, &dec->mbring);
+   nouveau_bo_ref(NULL, &dec->vpring);
+   nouveau_bo_ref(NULL, &dec->bitstream);
+   nouveau_bo_ref(NULL, &dec->vp_params);
+   nouveau_bo_ref(NULL, &dec->fence);
+
+   nouveau_object_del(&dec->bsp);
+   nouveau_object_del(&dec->vp);
+
+   nouveau_bufctx_del(&dec->bsp_bufctx);
+   nouveau_pushbuf_del(&dec->bsp_pushbuf);
+   nouveau_object_del(&dec->bsp_channel);
+
+   nouveau_bufctx_del(&dec->vp_bufctx);
+   nouveau_pushbuf_del(&dec->vp_pushbuf);
+   nouveau_object_del(&dec->vp_channel);
+
+   nouveau_client_del(&dec->client);
+
+   if (dec->mpeg12_bs)
+      FREE(dec->mpeg12_bs);
+   FREE(dec);
+}
+
+struct pipe_video_decoder *
+nv84_create_decoder(struct pipe_context *context,
+                    enum pipe_video_profile profile,
+                    enum pipe_video_entrypoint entrypoint,
+                    enum pipe_video_chroma_format chroma_format,
+                    unsigned width, unsigned height,
+                    unsigned max_references,
+                    bool chunked_decode)
+{
+   struct nv50_context *nv50 = (struct nv50_context *)context;
+   struct nouveau_screen *screen = &nv50->screen->base;
+   struct nv84_decoder *dec;
+   struct nouveau_pushbuf *bsp_push, *vp_push;
+   struct nv50_surface surf;
+   struct nv50_miptree mip;
+   union pipe_color_union color;
+   struct nv04_fifo nv04_data = { .vram = 0xbeef0201, .gart = 0xbeef0202 };
+   int ret, i;
+   int is_h264 = u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG4_AVC;
+   int is_mpeg12 = u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12;
+
+   if (getenv("XVMC_VL"))
+      return vl_create_decoder(context, profile, entrypoint,
+                               chroma_format, width, height,
+                               max_references, chunked_decode);
+
+   if ((is_h264 && entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM) ||
+       (is_mpeg12 && entrypoint > PIPE_VIDEO_ENTRYPOINT_IDCT)) {
+      debug_printf("%x\n", entrypoint);
+      return NULL;
+   }
+
+   if (!is_h264 && !is_mpeg12) {
+      debug_printf("invalid profile: %x\n", profile);
+      return NULL;
+   }
+
+   dec = CALLOC_STRUCT(nv84_decoder);
+   if (!dec)
+      return NULL;
+
+   dec->base.context = context;
+   dec->base.profile = profile;
+   dec->base.entrypoint = entrypoint;
+   dec->base.chroma_format = chroma_format;
+   dec->base.width = width;
+   dec->base.height = height;
+   dec->base.max_references = max_references;
+   dec->base.destroy = nv84_decoder_destroy;
+   dec->base.flush = nv84_decoder_flush;
+   if (is_h264) {
+      dec->base.decode_bitstream = nv84_decoder_decode_bitstream_h264;
+      dec->base.begin_frame = nv84_decoder_begin_frame_h264;
+      dec->base.end_frame = nv84_decoder_end_frame_h264;
+
+      dec->frame_mbs = mb(dec->base.width) * mb_half(dec->base.height) * 2;
+      dec->frame_size = dec->frame_mbs << 8;
+      dec->vpring_deblock = align(0x30 * dec->frame_mbs, 0x100);
+      dec->vpring_residual = 0x2000 + MAX2(0x32000, 0x600 * dec->frame_mbs);
+      dec->vpring_ctrl = MAX2(0x10000, align(0x1080 + 0x144 * dec->frame_mbs, 0x100));
+   } else if (is_mpeg12) {
+      dec->base.decode_macroblock = nv84_decoder_decode_macroblock;
+      dec->base.begin_frame = nv84_decoder_begin_frame_mpeg12;
+      dec->base.end_frame = nv84_decoder_end_frame_mpeg12;
+
+      if (entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+         dec->mpeg12_bs = CALLOC_STRUCT(vl_mpg12_bs);
+         if (!dec->mpeg12_bs)
+            goto fail;
+         vl_mpg12_bs_init(dec->mpeg12_bs, &dec->base);
+         dec->base.decode_bitstream = nv84_decoder_decode_bitstream_mpeg12;
+      }
+   } else {
+      goto fail;
+   }
+
+   ret = nouveau_client_new(screen->device, &dec->client);
+   if (ret)
+      goto fail;
+
+   if (is_h264) {
+      ret = nouveau_object_new(&screen->device->object, 0,
+                               NOUVEAU_FIFO_CHANNEL_CLASS,
+                               &nv04_data, sizeof(nv04_data), &dec->bsp_channel);
+      if (ret)
+         goto fail;
+
+      ret = nouveau_pushbuf_new(dec->client, dec->bsp_channel, 4,
+                                32 * 1024, true, &dec->bsp_pushbuf);
+      if (ret)
+         goto fail;
+
+      ret = nouveau_bufctx_new(dec->client, 1, &dec->bsp_bufctx);
+      if (ret)
+         goto fail;
+   }
+
+   ret = nouveau_object_new(&screen->device->object, 0,
+                            NOUVEAU_FIFO_CHANNEL_CLASS,
+                            &nv04_data, sizeof(nv04_data), &dec->vp_channel);
+   if (ret)
+      goto fail;
+   ret = nouveau_pushbuf_new(dec->client, dec->vp_channel, 4,
+                             32 * 1024, true, &dec->vp_pushbuf);
+   if (ret)
+      goto fail;
+
+   ret = nouveau_bufctx_new(dec->client, 1, &dec->vp_bufctx);
+   if (ret)
+      goto fail;
+
+   bsp_push = dec->bsp_pushbuf;
+   vp_push = dec->vp_pushbuf;
+
+   if (is_h264) {
+      dec->bsp_fw = nv84_load_bsp_firmware(screen->device, dec);
+      dec->vp_fw = nv84_load_vp_firmware(screen->device, dec);
+      if (!dec->bsp_fw || !dec->vp_fw)
+         goto fail;
+   }
+   if (is_mpeg12) {
+      dec->vp_fw = nv84_load_vp_firmware_mpeg(screen->device, dec);
+      if (!dec->vp_fw)
+         goto fail;
+   }
+
+   if (is_h264) {
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP,
+                           0, 0x40000, NULL, &dec->bsp_data);
+      if (ret)
+         goto fail;
+   }
+   ret = nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP,
+                        0, 0x40000, NULL, &dec->vp_data);
+   if (ret)
+      goto fail;
+   if (is_h264) {
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP,
+                           0,
+                           2 * (dec->vpring_deblock +
+                                dec->vpring_residual +
+                                dec->vpring_ctrl +
+                                0x1000),
+                           NULL, &dec->vpring);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP,
+                           0,
+                           (max_references + 1) * dec->frame_mbs * 0x40 +
+                           dec->frame_size + 0x2000,
+                           NULL, &dec->mbring);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_GART,
+                           0, 2 * (0x700 + MAX2(0x40000, 0x800 + 0x180 * dec->frame_mbs)),
+                           NULL, &dec->bitstream);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_map(dec->bitstream, NOUVEAU_BO_WR, dec->client);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_GART,
+                           0, 0x2000, NULL, &dec->vp_params);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_map(dec->vp_params, NOUVEAU_BO_WR, dec->client);
+      if (ret)
+         goto fail;
+   }
+   if (is_mpeg12) {
+      ret = nouveau_bo_new(screen->device, NOUVEAU_BO_GART,
+                           0,
+                           align(0x20 * mb(width) * mb(height), 0x100) +
+                           (6 * 64 * 8) * mb(width) * mb(height) + 0x100,
+                           NULL, &dec->mpeg12_bo);
+      if (ret)
+         goto fail;
+      ret = nouveau_bo_map(dec->mpeg12_bo, NOUVEAU_BO_WR, dec->client);
+      if (ret)
+         goto fail;
+   }
+
+   ret = nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM,
+                        0, 0x1000, NULL, &dec->fence);
+   if (ret)
+      goto fail;
+   ret = nouveau_bo_map(dec->fence, NOUVEAU_BO_WR, dec->client);
+   if (ret)
+      goto fail;
+   *(uint32_t *)dec->fence->map = 0;
+
+   if (is_h264) {
+      nouveau_pushbuf_bufctx(bsp_push, dec->bsp_bufctx);
+      nouveau_bufctx_refn(dec->bsp_bufctx, 0,
+                          dec->bsp_fw, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
+      nouveau_bufctx_refn(dec->bsp_bufctx, 0,
+                          dec->bsp_data, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
+   }
+
+   nouveau_pushbuf_bufctx(vp_push, dec->vp_bufctx);
+   nouveau_bufctx_refn(dec->vp_bufctx, 0, dec->vp_fw,
+                       NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
+   nouveau_bufctx_refn(dec->vp_bufctx, 0, dec->vp_data,
+                       NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
+
+   if (is_h264 && !ret)
+      ret = nouveau_object_new(dec->bsp_channel, 0xbeef74b0, 0x74b0,
+                               NULL, 0, &dec->bsp);
+
+   if (!ret)
+      ret = nouveau_object_new(dec->vp_channel, 0xbeef7476, 0x7476,
+                               NULL, 0, &dec->vp);
+
+   if (ret)
+      goto fail;
+
+
+   if (is_h264) {
+      /* Zero out some parts of mbring/vpring. there's gotta be some cleaner way
+       * of doing this... perhaps makes sense to just copy the relevant logic
+       * here. */
+      color.f[0] = color.f[1] = color.f[2] = color.f[3] = 0;
+      surf.offset = dec->frame_size;
+      surf.width = 64;
+      surf.height = (max_references + 1) * dec->frame_mbs / 4;
+      surf.depth = 1;
+      surf.base.format = PIPE_FORMAT_B8G8R8A8_UNORM;
+      surf.base.u.tex.level = 0;
+      surf.base.texture = &mip.base.base;
+      mip.level[0].tile_mode = 0;
+      mip.level[0].pitch = surf.width * 4;
+      mip.base.domain = NOUVEAU_BO_VRAM;
+      mip.base.bo = dec->mbring;
+      context->clear_render_target(context, &surf.base, &color, 0, 0, 64, 4760);
+      surf.offset = dec->vpring->size / 2 - 0x1000;
+      surf.width = 1024;
+      surf.height = 1;
+      mip.level[0].pitch = surf.width * 4;
+      mip.base.bo = dec->vpring;
+      context->clear_render_target(context, &surf.base, &color, 0, 0, 1024, 1);
+      surf.offset = dec->vpring->size - 0x1000;
+      context->clear_render_target(context, &surf.base, &color, 0, 0, 1024, 1);
+
+      PUSH_SPACE(screen->pushbuf, 5);
+      PUSH_REFN(screen->pushbuf, dec->fence, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
+      /* The clear_render_target is done via 3D engine, so use it to write to a
+       * sempahore to indicate that it's done.
+       */
+      BEGIN_NV04(screen->pushbuf, NV50_3D(QUERY_ADDRESS_HIGH), 4);
+      PUSH_DATAh(screen->pushbuf, dec->fence->offset);
+      PUSH_DATA (screen->pushbuf, dec->fence->offset);
+      PUSH_DATA (screen->pushbuf, 1);
+      PUSH_DATA (screen->pushbuf, 0xf010);
+      PUSH_KICK (screen->pushbuf);
+
+      PUSH_SPACE(bsp_push, 2 + 12 + 2 + 4 + 3);
+
+      BEGIN_NV04(bsp_push, SUBC_BSP(NV01_SUBCHAN_OBJECT), 1);
+      PUSH_DATA (bsp_push, dec->bsp->handle);
+
+      BEGIN_NV04(bsp_push, SUBC_BSP(0x180), 11);
+      for (i = 0; i < 11; i++)
+         PUSH_DATA(bsp_push, nv04_data.vram);
+      BEGIN_NV04(bsp_push, SUBC_BSP(0x1b8), 1);
+      PUSH_DATA (bsp_push, nv04_data.vram);
+
+      BEGIN_NV04(bsp_push, SUBC_BSP(0x600), 3);
+      PUSH_DATAh(bsp_push, dec->bsp_fw->offset);
+      PUSH_DATA (bsp_push, dec->bsp_fw->offset);
+      PUSH_DATA (bsp_push, dec->bsp_fw->size);
+
+      BEGIN_NV04(bsp_push, SUBC_BSP(0x628), 2);
+      PUSH_DATA (bsp_push, dec->bsp_data->offset >> 8);
+      PUSH_DATA (bsp_push, dec->bsp_data->size);
+      PUSH_KICK (bsp_push);
+   }
+
+   PUSH_SPACE(vp_push, 2 + 12 + 2 + 4 + 3);
+
+   BEGIN_NV04(vp_push, SUBC_VP(NV01_SUBCHAN_OBJECT), 1);
+   PUSH_DATA (vp_push, dec->vp->handle);
+
+   BEGIN_NV04(vp_push, SUBC_VP(0x180), 11);
+   for (i = 0; i < 11; i++)
+      PUSH_DATA(vp_push, nv04_data.vram);
+
+   BEGIN_NV04(vp_push, SUBC_VP(0x1b8), 1);
+   PUSH_DATA (vp_push, nv04_data.vram);
+
+   BEGIN_NV04(vp_push, SUBC_VP(0x600), 3);
+   PUSH_DATAh(vp_push, dec->vp_fw->offset);
+   PUSH_DATA (vp_push, dec->vp_fw->offset);
+   PUSH_DATA (vp_push, dec->vp_fw->size);
+
+   BEGIN_NV04(vp_push, SUBC_VP(0x628), 2);
+   PUSH_DATA (vp_push, dec->vp_data->offset >> 8);
+   PUSH_DATA (vp_push, dec->vp_data->size);
+   PUSH_KICK (vp_push);
+
+   return &dec->base;
+fail:
+   nv84_decoder_destroy(&dec->base);
+   return NULL;
+}
+
+static struct pipe_sampler_view **
+nv84_video_buffer_sampler_view_planes(struct pipe_video_buffer *buffer)
+{
+   struct nv84_video_buffer *buf = (struct nv84_video_buffer *)buffer;
+   return buf->sampler_view_planes;
+}
+
+static struct pipe_sampler_view **
+nv84_video_buffer_sampler_view_components(struct pipe_video_buffer *buffer)
+{
+   struct nv84_video_buffer *buf = (struct nv84_video_buffer *)buffer;
+   return buf->sampler_view_components;
+}
+
+static struct pipe_surface **
+nv84_video_buffer_surfaces(struct pipe_video_buffer *buffer)
+{
+   struct nv84_video_buffer *buf = (struct nv84_video_buffer *)buffer;
+   return buf->surfaces;
+}
+
+static void
+nv84_video_buffer_destroy(struct pipe_video_buffer *buffer)
+{
+   struct nv84_video_buffer *buf = (struct nv84_video_buffer *)buffer;
+   unsigned i;
+
+   assert(buf);
+
+   for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
+      pipe_resource_reference(&buf->resources[i], NULL);
+      pipe_sampler_view_reference(&buf->sampler_view_planes[i], NULL);
+      pipe_sampler_view_reference(&buf->sampler_view_components[i], NULL);
+      pipe_surface_reference(&buf->surfaces[i * 2], NULL);
+      pipe_surface_reference(&buf->surfaces[i * 2 + 1], NULL);
+   }
+
+   nouveau_bo_ref(NULL, &buf->interlaced);
+   nouveau_bo_ref(NULL, &buf->full);
+
+   FREE(buffer);
+}
+
+struct pipe_video_buffer *
+nv84_video_buffer_create(struct pipe_context *pipe,
+                         const struct pipe_video_buffer *template)
+{
+   struct nv84_video_buffer *buffer;
+   struct pipe_resource templ;
+   unsigned i, j, component;
+   struct pipe_sampler_view sv_templ;
+   struct pipe_surface surf_templ;
+   struct nv50_miptree *mt0, *mt1;
+   struct nouveau_bo *empty = NULL;
+   struct nouveau_screen *screen = &((struct nv50_context *)pipe)->screen->base;
+   union nouveau_bo_config cfg;
+   unsigned bo_size;
+
+   if (getenv("XVMC_VL"))
+      return vl_video_buffer_create(pipe, template);
+
+   if (!template->interlaced) {
+      debug_printf("Require interlaced video buffers\n");
+      return NULL;
+   }
+   if (template->buffer_format != PIPE_FORMAT_NV12) {
+      debug_printf("Must use NV12 format\n");
+      return NULL;
+   }
+   if (template->chroma_format != PIPE_VIDEO_CHROMA_FORMAT_420) {
+      debug_printf("Must use 4:2:0 format\n");
+      return NULL;
+   }
+
+   /*
+    * Note that there are always going to be exactly two planes, one for Y,
+    * and one for UV. These are also the resources. VP expects these to be
+    * adjacent, so they need to belong to the same BO.
+    */
+
+   buffer = CALLOC_STRUCT(nv84_video_buffer);
+   if (!buffer) return NULL;
+
+   buffer->mvidx = -1;
+
+   buffer->base.buffer_format = template->buffer_format;
+   buffer->base.context = pipe;
+   buffer->base.destroy = nv84_video_buffer_destroy;
+   buffer->base.chroma_format = template->chroma_format;
+   buffer->base.width = template->width;
+   buffer->base.height = template->height;
+   buffer->base.get_sampler_view_planes = nv84_video_buffer_sampler_view_planes;
+   buffer->base.get_sampler_view_components = nv84_video_buffer_sampler_view_components;
+   buffer->base.get_surfaces = nv84_video_buffer_surfaces;
+   buffer->base.interlaced = true;
+
+   memset(&templ, 0, sizeof(templ));
+   templ.target = PIPE_TEXTURE_2D_ARRAY;
+   templ.depth0 = 1;
+   templ.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET;
+   templ.format = PIPE_FORMAT_R8_UNORM;
+   templ.width0 = align(template->width, 2);
+   templ.height0 = align(template->height, 4) / 2;
+   templ.flags = NV50_RESOURCE_FLAG_VIDEO;
+   templ.array_size = 2;
+
+   cfg.nv50.tile_mode = 0x20;
+   cfg.nv50.memtype = 0x70;
+
+   buffer->resources[0] = pipe->screen->resource_create(pipe->screen, &templ);
+   if (!buffer->resources[0])
+      goto error;
+
+   templ.format = PIPE_FORMAT_R8G8_UNORM;
+   templ.width0 /= 2;
+   templ.height0 /= 2;
+   buffer->resources[1] = pipe->screen->resource_create(pipe->screen, &templ);
+   if (!buffer->resources[1])
+      goto error;
+
+   mt0 = nv50_miptree(buffer->resources[0]);
+   mt1 = nv50_miptree(buffer->resources[1]);
+
+   bo_size = mt0->total_size + mt1->total_size;
+   if (nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP, 0,
+                      bo_size, &cfg, &buffer->interlaced))
+      goto error;
+   /* XXX Change reference frame management so that this is only allocated in
+    * the decoder when necessary. */
+   if (nouveau_bo_new(screen->device, NOUVEAU_BO_VRAM | NOUVEAU_BO_NOSNOOP, 0,
+                      bo_size, &cfg, &buffer->full))
+      goto error;
+
+   mt0->base.bo = buffer->interlaced;
+   mt0->base.domain = NOUVEAU_BO_VRAM;
+   mt0->base.offset = 0;
+   mt0->base.address = buffer->interlaced->offset + mt0->base.offset;
+   nouveau_bo_ref(buffer->interlaced, &empty);
+
+   mt1->base.bo = buffer->interlaced;
+   mt1->base.domain = NOUVEAU_BO_VRAM;
+   mt1->base.offset = mt0->layer_stride * 2;
+   mt1->base.address = buffer->interlaced->offset + mt1->base.offset;
+   nouveau_bo_ref(buffer->interlaced, &empty);
+
+   memset(&sv_templ, 0, sizeof(sv_templ));
+   for (component = 0, i = 0; i < 2; ++i ) {
+      struct pipe_resource *res = buffer->resources[i];
+      unsigned nr_components = util_format_get_nr_components(res->format);
+
+      u_sampler_view_default_template(&sv_templ, res, res->format);
+      buffer->sampler_view_planes[i] =
+         pipe->create_sampler_view(pipe, res, &sv_templ);
+      if (!buffer->sampler_view_planes[i])
+         goto error;
+
+      for (j = 0; j < nr_components; ++j, ++component) {
+         sv_templ.swizzle_r = sv_templ.swizzle_g = sv_templ.swizzle_b =
+            PIPE_SWIZZLE_RED + j;
+         sv_templ.swizzle_a = PIPE_SWIZZLE_ONE;
+
+         buffer->sampler_view_components[component] =
+            pipe->create_sampler_view(pipe, res, &sv_templ);
+         if (!buffer->sampler_view_components[component])
+            goto error;
+      }
+   }
+
+   memset(&surf_templ, 0, sizeof(surf_templ));
+   for (j = 0; j < 2; ++j) {
+      surf_templ.format = buffer->resources[j]->format;
+      surf_templ.u.tex.first_layer = surf_templ.u.tex.last_layer = 0;
+      buffer->surfaces[j * 2] =
+         pipe->create_surface(pipe, buffer->resources[j], &surf_templ);
+      if (!buffer->surfaces[j * 2])
+         goto error;
+
+      surf_templ.u.tex.first_layer = surf_templ.u.tex.last_layer = 1;
+      buffer->surfaces[j * 2 + 1] =
+         pipe->create_surface(pipe, buffer->resources[j], &surf_templ);
+      if (!buffer->surfaces[j * 2 + 1])
+         goto error;
+   }
+
+   return &buffer->base;
+
+error:
+   nv84_video_buffer_destroy(&buffer->base);
+   return NULL;
+}
+
+int
+nv84_screen_get_video_param(struct pipe_screen *pscreen,
+                            enum pipe_video_profile profile,
+                            enum pipe_video_cap param)
+{
+   switch (param) {
+   case PIPE_VIDEO_CAP_SUPPORTED:
+      return u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG4_AVC ||
+         u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12;
+   case PIPE_VIDEO_CAP_NPOT_TEXTURES:
+      return 1;
+   case PIPE_VIDEO_CAP_MAX_WIDTH:
+   case PIPE_VIDEO_CAP_MAX_HEIGHT:
+      return 2048;
+   case PIPE_VIDEO_CAP_PREFERED_FORMAT:
+      return PIPE_FORMAT_NV12;
+   case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED:
+   case PIPE_VIDEO_CAP_PREFERS_INTERLACED:
+      return true;
+   case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE:
+      return false;
+   default:
+      debug_printf("unknown video param: %d\n", param);
+      return 0;
+   }
+}
+
+boolean
+nv84_screen_video_supported(struct pipe_screen *screen,
+                            enum pipe_format format,
+                            enum pipe_video_profile profile)
+{
+   if (profile != PIPE_VIDEO_PROFILE_UNKNOWN)
+      return format == PIPE_FORMAT_NV12;
+
+   return vl_video_buffer_is_format_supported(screen, format, profile);
+}
diff --git a/src/gallium/drivers/nv50/nv84_video.h b/src/gallium/drivers/nv50/nv84_video.h
new file mode 100644
index 0000000..4ff8cf3
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv84_video.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2013 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef NV84_VIDEO_H_
+#define NV84_VIDEO_H_
+
+#include "vl/vl_decoder.h"
+#include "vl/vl_video_buffer.h"
+#include "vl/vl_types.h"
+
+#include "vl/vl_mpeg12_bitstream.h"
+
+#include "util/u_video.h"
+
+#include "nv50_context.h"
+
+union pipe_desc {
+   struct pipe_picture_desc *base;
+   struct pipe_mpeg12_picture_desc *mpeg12;
+   struct pipe_mpeg4_picture_desc *mpeg4;
+   struct pipe_vc1_picture_desc *vc1;
+   struct pipe_h264_picture_desc *h264;
+};
+
+struct nv84_video_buffer {
+   struct pipe_video_buffer base;
+   struct pipe_resource *resources[VL_NUM_COMPONENTS];
+   struct pipe_sampler_view *sampler_view_planes[VL_NUM_COMPONENTS];
+   struct pipe_sampler_view *sampler_view_components[VL_NUM_COMPONENTS];
+   struct pipe_surface *surfaces[VL_NUM_COMPONENTS * 2];
+
+   struct nouveau_bo *interlaced, *full;
+   int mvidx;
+   unsigned frame_num, frame_num_max;
+};
+
+struct nv84_decoder {
+   struct pipe_video_decoder base;
+   struct nouveau_client *client;
+   struct nouveau_object *bsp_channel, *vp_channel, *bsp, *vp;
+   struct nouveau_pushbuf *bsp_pushbuf, *vp_pushbuf;
+   struct nouveau_bufctx *bsp_bufctx, *vp_bufctx;
+
+   struct nouveau_bo *bsp_fw, *bsp_data;
+   struct nouveau_bo *vp_fw, *vp_data;
+   struct nouveau_bo *mbring, *vpring;
+
+   /*
+    * states:
+    *  0: init
+    *  1: vpring/mbring cleared, bsp is ready
+    *  2: bsp is done, vp is ready
+    * and then vp it back to 1
+    */
+   struct nouveau_bo *fence;
+
+   struct nouveau_bo *bitstream;
+   struct nouveau_bo *vp_params;
+
+   size_t vp_fw2_offset;
+
+   unsigned frame_mbs, frame_size;
+   /* VPRING layout:
+        RESIDUAL
+        CTRL
+        DEBLOCK
+        0x1000
+   */
+   unsigned vpring_deblock, vpring_residual, vpring_ctrl;
+
+
+   struct vl_mpg12_bs *mpeg12_bs;
+
+   struct nouveau_bo *mpeg12_bo;
+   void *mpeg12_mb_info;
+   uint16_t *mpeg12_data;
+   const int *zscan;
+   uint8_t mpeg12_intra_matrix[64];
+   uint8_t mpeg12_non_intra_matrix[64];
+};
+
+static INLINE uint32_t mb(uint32_t coord)
+{
+   return (coord + 0xf)>>4;
+}
+
+static INLINE uint32_t mb_half(uint32_t coord)
+{
+   return (coord + 0x1f)>>5;
+}
+
+int
+nv84_decoder_bsp(struct nv84_decoder *dec,
+                 struct pipe_h264_picture_desc *desc,
+                 unsigned num_buffers,
+                 const void *const *data,
+                 const unsigned *num_bytes,
+                 struct nv84_video_buffer *dest);
+
+void
+nv84_decoder_vp_h264(struct nv84_decoder *dec,
+                     struct pipe_h264_picture_desc *desc,
+                     struct nv84_video_buffer *dest);
+
+void
+nv84_decoder_vp_mpeg12_mb(struct nv84_decoder *dec,
+                          struct pipe_mpeg12_picture_desc *desc,
+                          const struct pipe_mpeg12_macroblock *mb);
+
+void
+nv84_decoder_vp_mpeg12(struct nv84_decoder *dec,
+                       struct pipe_mpeg12_picture_desc *desc,
+                       struct nv84_video_buffer *dest);
+
+#endif
diff --git a/src/gallium/drivers/nv50/nv84_video_bsp.c b/src/gallium/drivers/nv50/nv84_video_bsp.c
new file mode 100644
index 0000000..7885210
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv84_video_bsp.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2013 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "nv84_video.h"
+
+struct iparm {
+   struct iseqparm {
+      uint32_t chroma_format_idc; // 00
+      uint32_t pad[(0x128 - 0x4) / 4];
+      uint32_t log2_max_frame_num_minus4; // 128
+      uint32_t pic_order_cnt_type; // 12c
+      uint32_t log2_max_pic_order_cnt_lsb_minus4; // 130
+      uint32_t delta_pic_order_always_zero_flag; // 134
+      uint32_t num_ref_frames; // 138
+      uint32_t pic_width_in_mbs_minus1; // 13c
+      uint32_t pic_height_in_map_units_minus1; // 140
+      uint32_t frame_mbs_only_flag; // 144
+      uint32_t mb_adaptive_frame_field_flag; // 148
+      uint32_t direct_8x8_inference_flag; // 14c
+   } iseqparm; // 000
+   struct ipicparm {
+      uint32_t entropy_coding_mode_flag; // 00
+      uint32_t pic_order_present_flag; // 04
+      uint32_t num_slice_groups_minus1; // 08
+      uint32_t slice_group_map_type; // 0c
+      uint32_t pad1[0x60 / 4];
+      uint32_t u70; // 70
+      uint32_t u74; // 74
+      uint32_t u78; // 78
+      uint32_t num_ref_idx_l0_active_minus1; // 7c
+      uint32_t num_ref_idx_l1_active_minus1; // 80
+      uint32_t weighted_pred_flag; // 84
+      uint32_t weighted_bipred_idc; // 88
+      uint32_t pic_init_qp_minus26; // 8c
+      uint32_t chroma_qp_index_offset; // 90
+      uint32_t deblocking_filter_control_present_flag; // 94
+      uint32_t constrained_intra_pred_flag; // 98
+      uint32_t redundant_pic_cnt_present_flag; // 9c
+      uint32_t transform_8x8_mode_flag; // a0
+      uint32_t pad2[(0x1c8 - 0xa0 - 4) / 4];
+      uint32_t second_chroma_qp_index_offset; // 1c8
+      uint32_t u1cc; // 1cc
+      uint32_t curr_pic_order_cnt; // 1d0
+      uint32_t field_order_cnt[2]; // 1d4
+      uint32_t curr_mvidx; // 1dc
+      struct iref {
+         uint32_t u00; // 00
+         uint32_t field_is_ref; // 04 // bit0: top, bit1: bottom
+         uint8_t is_long_term; // 08
+         uint8_t non_existing; // 09
+         uint32_t frame_idx; // 0c
+         uint32_t field_order_cnt[2]; // 10
+         uint32_t mvidx; // 18
+         uint8_t field_pic_flag; // 1c
+         // 20
+      } refs[0x10]; // 1e0
+   } ipicparm; // 150
+};
+
+int
+nv84_decoder_bsp(struct nv84_decoder *dec,
+                 struct pipe_h264_picture_desc *desc,
+                 unsigned num_buffers,
+                 const void *const *data,
+                 const unsigned *num_bytes,
+                 struct nv84_video_buffer *dest)
+{
+   struct iparm params;
+   uint32_t more_params[0x44 / 4] = {0};
+   unsigned total_bytes = 0;
+   int i;
+   static const uint32_t end[] = {0x0b010000, 0, 0x0b010000, 0};
+   char indexes[17] = {0};
+   struct nouveau_pushbuf *push = dec->bsp_pushbuf;
+   struct nouveau_pushbuf_refn bo_refs[] = {
+      { dec->vpring, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->mbring, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->bitstream, NOUVEAU_BO_RDWR | NOUVEAU_BO_GART },
+      { dec->fence, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+   };
+
+   nouveau_bo_wait(dec->fence, NOUVEAU_BO_RDWR, dec->client);
+
+   STATIC_ASSERT(sizeof(struct iparm) == 0x530);
+
+   memset(&params, 0, sizeof(params));
+
+   dest->frame_num = dest->frame_num_max = desc->frame_num;
+
+   for (i = 0; i < 16; i++) {
+      struct iref *ref = &params.ipicparm.refs[i];
+      struct nv84_video_buffer *frame = (struct nv84_video_buffer *)desc->ref[i];
+      if (!frame) break;
+      /* The frame index is relative to the last IDR frame. So once the frame
+       * num goes back to 0, previous reference frames need to have a negative
+       * index.
+       */
+      if (desc->frame_num >= frame->frame_num_max) {
+         frame->frame_num_max = desc->frame_num;
+      } else {
+         frame->frame_num -= frame->frame_num_max + 1;
+         frame->frame_num_max = desc->frame_num;
+      }
+      ref->non_existing = 0;
+      ref->field_is_ref = (desc->top_is_reference[i] ? 1 : 0) |
+         (desc->bottom_is_reference[i] ? 2 : 0);
+      ref->is_long_term = desc->is_long_term[i];
+      ref->field_order_cnt[0] = desc->field_order_cnt_list[i][0];
+      ref->field_order_cnt[1] = desc->field_order_cnt_list[i][1];
+      ref->frame_idx = frame->frame_num;
+      ref->u00 = ref->mvidx = frame->mvidx;
+      ref->field_pic_flag = desc->field_pic_flag;
+      indexes[frame->mvidx] = 1;
+   }
+
+   /* Needs to be adjusted if we ever support non-4:2:0 videos */
+   params.iseqparm.chroma_format_idc = 1;
+
+   params.iseqparm.pic_width_in_mbs_minus1 = mb(dec->base.width) - 1;
+   if (desc->field_pic_flag)
+      params.iseqparm.pic_height_in_map_units_minus1 = mb_half(dec->base.height) - 1;
+   else
+      params.iseqparm.pic_height_in_map_units_minus1 = mb(dec->base.height) - 1;
+
+   /* TODO: interlaced still doesn't work, maybe due to ref frame management. */
+   if (desc->bottom_field_flag)
+      params.ipicparm.curr_pic_order_cnt = desc->field_order_cnt[1];
+   else
+      params.ipicparm.curr_pic_order_cnt = desc->field_order_cnt[0];
+   params.ipicparm.field_order_cnt[0] = desc->field_order_cnt[0];
+   params.ipicparm.field_order_cnt[1] = desc->field_order_cnt[1];
+   if (desc->is_reference) {
+      if (dest->mvidx < 0) {
+         for (i = 0; i < desc->num_ref_frames + 1; i++) {
+            if (!indexes[i]) {
+               dest->mvidx = i;
+               break;
+            }
+         }
+         assert(i != desc->num_ref_frames + 1);
+      }
+
+      params.ipicparm.u1cc = params.ipicparm.curr_mvidx = dest->mvidx;
+   }
+
+   params.iseqparm.num_ref_frames = desc->num_ref_frames;
+   params.iseqparm.mb_adaptive_frame_field_flag = desc->mb_adaptive_frame_field_flag;
+   params.ipicparm.constrained_intra_pred_flag = desc->constrained_intra_pred_flag;
+   params.ipicparm.weighted_pred_flag = desc->weighted_pred_flag;
+   params.ipicparm.weighted_bipred_idc = desc->weighted_bipred_idc;
+   params.iseqparm.frame_mbs_only_flag = desc->frame_mbs_only_flag;
+   params.ipicparm.transform_8x8_mode_flag = desc->transform_8x8_mode_flag;
+   params.ipicparm.chroma_qp_index_offset = desc->chroma_qp_index_offset;
+   params.ipicparm.second_chroma_qp_index_offset = desc->second_chroma_qp_index_offset;
+   params.ipicparm.pic_init_qp_minus26 = desc->pic_init_qp_minus26;
+   params.ipicparm.num_ref_idx_l0_active_minus1 = desc->num_ref_idx_l0_active_minus1;
+   params.ipicparm.num_ref_idx_l1_active_minus1 = desc->num_ref_idx_l1_active_minus1;
+   params.iseqparm.log2_max_frame_num_minus4 = desc->log2_max_frame_num_minus4;
+   params.iseqparm.pic_order_cnt_type = desc->pic_order_cnt_type;
+   params.iseqparm.log2_max_pic_order_cnt_lsb_minus4 = desc->log2_max_pic_order_cnt_lsb_minus4;
+   params.iseqparm.delta_pic_order_always_zero_flag = desc->delta_pic_order_always_zero_flag;
+   params.iseqparm.direct_8x8_inference_flag = desc->direct_8x8_inference_flag;
+   params.ipicparm.entropy_coding_mode_flag = desc->entropy_coding_mode_flag;
+   params.ipicparm.pic_order_present_flag = desc->pic_order_present_flag;
+   params.ipicparm.deblocking_filter_control_present_flag = desc->deblocking_filter_control_present_flag;
+   params.ipicparm.redundant_pic_cnt_present_flag = desc->redundant_pic_cnt_present_flag;
+
+   memcpy(dec->bitstream->map, &params, sizeof(params));
+   for (i = 0; i < num_buffers; i++) {
+      assert(total_bytes + num_bytes[i] < dec->bitstream->size / 2 - 0x700);
+      memcpy(dec->bitstream->map + 0x700 + total_bytes, data[i], num_bytes[i]);
+      total_bytes += num_bytes[i];
+   }
+   memcpy(dec->bitstream->map + 0x700 + total_bytes, end, sizeof(end));
+   total_bytes += sizeof(end);
+   more_params[1] = total_bytes;
+   memcpy(dec->bitstream->map + 0x600, more_params, sizeof(more_params));
+
+   PUSH_SPACE(push, 5 + 21 + 3 + 2 + 4 + 2);
+   nouveau_pushbuf_refn(push, bo_refs, sizeof(bo_refs)/sizeof(bo_refs[0]));
+
+   /* Wait for the fence = 1 */
+   BEGIN_NV04(push, SUBC_BSP(0x10), 4);
+   PUSH_DATAh(push, dec->fence->offset);
+   PUSH_DATA (push, dec->fence->offset);
+   PUSH_DATA (push, 1);
+   PUSH_DATA (push, 1);
+
+   /* TODO: Use both halves of bitstream/vpring for alternating frames */
+
+   /* Kick off the BSP */
+   BEGIN_NV04(push, SUBC_BSP(0x400), 20);
+   PUSH_DATA (push, dec->bitstream->offset >> 8);
+   PUSH_DATA (push, (dec->bitstream->offset >> 8) + 7);
+   PUSH_DATA (push, dec->bitstream->size / 2 - 0x700);
+   PUSH_DATA (push, (dec->bitstream->offset >> 8) + 6);
+   PUSH_DATA (push, 1);
+   PUSH_DATA (push, dec->mbring->offset >> 8);
+   PUSH_DATA (push, dec->frame_size);
+   PUSH_DATA (push, (dec->mbring->offset + dec->frame_size) >> 8);
+   PUSH_DATA (push, dec->vpring->offset >> 8);
+   PUSH_DATA (push, dec->vpring->size / 2);
+   PUSH_DATA (push, dec->vpring_residual);
+   PUSH_DATA (push, dec->vpring_ctrl);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, dec->vpring_residual);
+   PUSH_DATA (push, dec->vpring_residual + dec->vpring_ctrl);
+   PUSH_DATA (push, dec->vpring_deblock);
+   PUSH_DATA (push, (dec->vpring->offset + dec->vpring_ctrl +
+                     dec->vpring_residual + dec->vpring_deblock) >> 8);
+   PUSH_DATA (push, 0x654321);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, 0x100008);
+
+   BEGIN_NV04(push, SUBC_BSP(0x620), 2);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, 0);
+
+   BEGIN_NV04(push, SUBC_BSP(0x300), 1);
+   PUSH_DATA (push, 0);
+
+   /* Write fence = 2, intr */
+   BEGIN_NV04(push, SUBC_BSP(0x610), 3);
+   PUSH_DATAh(push, dec->fence->offset);
+   PUSH_DATA (push, dec->fence->offset);
+   PUSH_DATA (push, 2);
+
+   BEGIN_NV04(push, SUBC_BSP(0x304), 1);
+   PUSH_DATA (push, 0x101);
+   PUSH_KICK (push);
+   return 0;
+}
diff --git a/src/gallium/drivers/nv50/nv84_video_vp.c b/src/gallium/drivers/nv50/nv84_video_vp.c
new file mode 100644
index 0000000..e968546
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv84_video_vp.c
@@ -0,0 +1,550 @@
+/*
+ * Copyright 2013 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "nv84_video.h"
+
+#include "util/u_sse.h"
+
+struct h264_iparm1 {
+   uint8_t scaling_lists_4x4[6][16]; // 00
+   uint8_t scaling_lists_8x8[2][64]; // 60
+   uint32_t width; // e0
+   uint32_t height; // e4
+   uint64_t ref1_addrs[16]; // e8
+   uint64_t ref2_addrs[16]; // 168
+   uint32_t unk1e8;
+   uint32_t unk1ec;
+   uint32_t w1; // 1f0
+   uint32_t w2; // 1f4
+   uint32_t w3; // 1f8
+   uint32_t h1; // 1fc
+   uint32_t h2; // 200
+   uint32_t h3; // 204
+   uint32_t unk208;
+   uint32_t field_pic_flag;
+   uint32_t format;
+   uint32_t unk214;
+};
+
+struct h264_iparm2 {
+   uint32_t width; // 00
+   uint32_t height; // 04
+   uint32_t mbs; // 08
+   uint32_t w1; // 0c
+   uint32_t w2; // 10
+   uint32_t w3; // 14
+   uint32_t h1; // 18
+   uint32_t h2; // 1c
+   uint32_t h3; // 20
+   uint32_t unk24;
+   uint32_t unk28;
+   uint32_t top; // 2c
+   uint32_t bottom; // 30
+   uint32_t is_reference; // 34
+};
+
+void
+nv84_decoder_vp_h264(struct nv84_decoder *dec,
+                     struct pipe_h264_picture_desc *desc,
+                     struct nv84_video_buffer *dest)
+{
+   struct h264_iparm1 param1;
+   struct h264_iparm2 param2;
+   int i, width = align(dest->base.width, 16),
+      height = align(dest->base.height, 16);
+
+   struct nouveau_pushbuf *push = dec->vp_pushbuf;
+   struct nouveau_pushbuf_refn bo_refs[] = {
+      { dest->interlaced, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dest->full, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->vpring, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->mbring, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->vp_params, NOUVEAU_BO_RDWR | NOUVEAU_BO_GART },
+      { dec->fence, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+   };
+   int num_refs = sizeof(bo_refs)/sizeof(*bo_refs);
+   bool is_ref = desc->is_reference;
+
+   STATIC_ASSERT(sizeof(struct h264_iparm1) == 0x218);
+   STATIC_ASSERT(sizeof(struct h264_iparm2) == 0x38);
+
+   memset(&param1, 0, sizeof(param1));
+   memset(&param2, 0, sizeof(param2));
+
+   memcpy(&param1.scaling_lists_4x4, desc->scaling_lists_4x4,
+          sizeof(param1.scaling_lists_4x4));
+   memcpy(&param1.scaling_lists_8x8, desc->scaling_lists_8x8,
+          sizeof(param1.scaling_lists_8x8));
+
+   param1.width = width;
+   param1.w1 = param1.w2 = param1.w3 = align(width, 64);
+   param1.height = param1.h2 = height;
+   param1.h1 = param1.h3 = align(height, 32);
+   param1.format = 0x3231564e; /* 'NV12' */
+   param1.field_pic_flag = desc->field_pic_flag;
+
+   param2.width = width;
+   param2.w1 = param2.w2 = param2.w3 = param1.w1;
+   if (desc->field_pic_flag)
+      param2.height = align(height, 32) / 2;
+   else
+      param2.height = height;
+   param2.h1 = param2.h2 = align(height, 32);
+   param2.h3 = height;
+   param2.mbs = width * height >> 8;
+   if (desc->field_pic_flag) {
+      param2.top = desc->bottom_field_flag ? 2 : 1;
+      param2.bottom = desc->bottom_field_flag;
+   }
+   param2.is_reference = desc->is_reference;
+
+   PUSH_SPACE(push, 5 + 16 + 3 + 2 + 6 + (is_ref ? 2 : 0) + 3 + 2 + 4 + 2);
+
+   struct nouveau_bo *ref2_default = dest->full;
+
+   for (i = 0; i < 16; i++) {
+      struct nv84_video_buffer *buf = (struct nv84_video_buffer *)desc->ref[i];
+      struct nouveau_bo *bo1, *bo2;
+      if (buf) {
+         bo1 = buf->interlaced;
+         bo2 = buf->full;
+         if (i == 0)
+            ref2_default = buf->full;
+      } else {
+         bo1 = dest->interlaced;
+         bo2 = ref2_default;
+      }
+      param1.ref1_addrs[i] = bo1->offset;
+      param1.ref2_addrs[i] = bo2->offset;
+      struct nouveau_pushbuf_refn bo_refs[] = {
+         { bo1, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+         { bo2, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      };
+      nouveau_pushbuf_refn(push, bo_refs, sizeof(bo_refs)/sizeof(bo_refs[0]));
+   }
+
+   memcpy(dec->vp_params->map, &param1, sizeof(param1));
+   memcpy(dec->vp_params->map + 0x400, &param2, sizeof(param2));
+
+   nouveau_pushbuf_refn(push, bo_refs, num_refs);
+
+   /* Wait for BSP to have completed */
+   BEGIN_NV04(push, SUBC_VP(0x10), 4);
+   PUSH_DATAh(push, dec->fence->offset);
+   PUSH_DATA (push, dec->fence->offset);
+   PUSH_DATA (push, 2);
+   PUSH_DATA (push, 1); /* wait for sem == 2 */
+
+   /* VP step 1 */
+   BEGIN_NV04(push, SUBC_VP(0x400), 15);
+   PUSH_DATA (push, 1);
+   PUSH_DATA (push, param2.mbs);
+   PUSH_DATA (push, 0x3987654); /* each nibble probably a dma index */
+   PUSH_DATA (push, 0x55001); /* constant */
+   PUSH_DATA (push, dec->vp_params->offset >> 8);
+   PUSH_DATA (push, (dec->vpring->offset + dec->vpring_residual) >> 8);
+   PUSH_DATA (push, dec->vpring_ctrl);
+   PUSH_DATA (push, dec->vpring->offset >> 8);
+   PUSH_DATA (push, dec->bitstream->size / 2 - 0x700);
+   PUSH_DATA (push, (dec->mbring->offset + dec->mbring->size - 0x2000) >> 8);
+   PUSH_DATA (push, (dec->vpring->offset + dec->vpring_ctrl +
+                     dec->vpring_residual + dec->vpring_deblock) >> 8);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, 0x100008);
+   PUSH_DATA (push, dest->interlaced->offset >> 8);
+   PUSH_DATA (push, 0);
+
+   BEGIN_NV04(push, SUBC_VP(0x620), 2);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, 0);
+
+   BEGIN_NV04(push, SUBC_VP(0x300), 1);
+   PUSH_DATA (push, 0);
+
+   /* VP step 2 */
+   BEGIN_NV04(push, SUBC_VP(0x400), 5);
+   PUSH_DATA (push, 0x54530201);
+   PUSH_DATA (push, (dec->vp_params->offset >> 8) + 0x4);
+   PUSH_DATA (push, (dec->vpring->offset + dec->vpring_ctrl +
+                     dec->vpring_residual) >> 8);
+   PUSH_DATA (push, dest->interlaced->offset >> 8);
+   PUSH_DATA (push, dest->interlaced->offset >> 8);
+
+   if (is_ref) {
+      BEGIN_NV04(push, SUBC_VP(0x414), 1);
+      PUSH_DATA (push, dest->full->offset >> 8);
+   }
+
+   BEGIN_NV04(push, SUBC_VP(0x620), 2);
+   PUSH_DATAh(push, dec->vp_fw2_offset);
+   PUSH_DATA (push, dec->vp_fw2_offset);
+
+   BEGIN_NV04(push, SUBC_VP(0x300), 1);
+   PUSH_DATA (push, 0);
+
+   /* Set the semaphore back to 1 */
+   BEGIN_NV04(push, SUBC_VP(0x610), 3);
+   PUSH_DATAh(push, dec->fence->offset);
+   PUSH_DATA (push, dec->fence->offset);
+   PUSH_DATA (push, 1);
+
+   /* Write to the semaphore location, intr */
+   BEGIN_NV04(push, SUBC_VP(0x304), 1);
+   PUSH_DATA (push, 0x101);
+
+   for (i = 0; i < 2; i++) {
+      struct nv50_miptree *mt = nv50_miptree(dest->resources[i]);
+      mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
+   }
+
+   PUSH_KICK (push);
+}
+
+static INLINE int16_t inverse_quantize(int16_t val, uint8_t quant, int mpeg1) {
+   int16_t ret = val * quant / 16;
+   if (mpeg1 && ret) {
+      if (ret > 0)
+         ret = (ret - 1) | 1;
+      else
+         ret = (ret + 1) | 1;
+   }
+   if (ret < -2048)
+      ret = -2048;
+   else if (ret > 2047)
+      ret = 2047;
+   return ret;
+}
+
+struct mpeg12_mb_info {
+   uint32_t index;
+   uint8_t unk4;
+   uint8_t unk5;
+   uint16_t coded_block_pattern;
+   uint8_t block_counts[6];
+   uint16_t PMV[8];
+   uint16_t skipped;
+};
+
+void
+nv84_decoder_vp_mpeg12_mb(struct nv84_decoder *dec,
+                          struct pipe_mpeg12_picture_desc *desc,
+                          const struct pipe_mpeg12_macroblock *macrob)
+{
+   STATIC_ASSERT(sizeof(struct mpeg12_mb_info) == 32);
+
+   struct mpeg12_mb_info info = {0};
+   int i, sum = 0, mask, block_index, count;
+   const int16_t *blocks;
+   int intra = macrob->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA;
+   int motion = macrob->macroblock_type &
+      (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD);
+   const uint8_t *quant_matrix = intra ? dec->mpeg12_intra_matrix :
+      dec->mpeg12_non_intra_matrix;
+   int mpeg1 = dec->base.profile == PIPE_VIDEO_PROFILE_MPEG1;
+
+   info.index = macrob->y * mb(dec->base.width) + macrob->x;
+   info.unk4 = motion;
+   if (intra)
+      info.unk4 |= 1;
+   if (macrob->macroblock_modes.bits.dct_type)
+      info.unk4 |= 0x20;
+   info.unk5 = (macrob->motion_vertical_field_select << 4) |
+      (macrob->macroblock_modes.value & 0xf);
+   info.coded_block_pattern = macrob->coded_block_pattern;
+   if (motion) {
+      memcpy(info.PMV, macrob->PMV, sizeof(info.PMV));
+   }
+   blocks = macrob->blocks;
+   for (mask = 0x20, block_index = 0; mask > 0; mask >>= 1, block_index++) {
+      if ((macrob->coded_block_pattern & mask) == 0)
+         continue;
+
+      count = 0;
+
+      /*
+       * The observation here is that there are a lot of 0's, and things go
+       * a lot faster if one skips over them.
+       */
+
+#if defined(PIPE_ARCH_SSE) && defined(PIPE_ARCH_X86_64)
+/* Note that the SSE implementation is much more tuned to X86_64. As it's not
+ * benchmarked on X86_32, disable it there. I suspect that the code needs to
+ * be reorganized in terms of 32-bit wide data in order to be more
+ * efficient. NV84+ were released well into the 64-bit CPU era, so it should
+ * be a minority case.
+ */
+
+/* This returns a 16-bit bit-mask, each 2 bits are both 1 or both 0, depending
+ * on whether the corresponding (16-bit) word in blocks is zero or non-zero. */
+#define wordmask(blocks, zero) \
+      (uint64_t)(_mm_movemask_epi8( \
+                       _mm_cmpeq_epi16( \
+                             zero, _mm_load_si128((__m128i *)(blocks)))))
+
+      __m128i zero = _mm_setzero_si128();
+
+      /* TODO: Look into doing the inverse quantization in terms of SSE
+       * operations unconditionally, when necessary. */
+      uint64_t bmask0 = wordmask(blocks, zero);
+      bmask0 |= wordmask(blocks + 8, zero) << 16;
+      bmask0 |= wordmask(blocks + 16, zero) << 32;
+      bmask0 |= wordmask(blocks + 24, zero) << 48;
+      uint64_t bmask1 = wordmask(blocks + 32, zero);
+      bmask1 |= wordmask(blocks + 40, zero) << 16;
+      bmask1 |= wordmask(blocks + 48, zero) << 32;
+      bmask1 |= wordmask(blocks + 56, zero) << 48;
+
+      /* The wordmask macro returns the inverse of what we want, since it
+       * returns a 1 for equal-to-zero. Invert. */
+      bmask0 = ~bmask0;
+      bmask1 = ~bmask1;
+
+      /* Note that the bitmask is actually sequences of 2 bits for each block
+       * index. This is because there is no movemask_epi16. That means that
+       * (a) ffs will never return 64, since the prev bit will always be set
+       * in that case, and (b) we need to do an extra bit shift. Or'ing the
+       * bitmasks together is faster than having a loop that computes them one
+       * at a time and processes them, on a Core i7-920. Trying to put bmask
+       * into an array and then looping also slows things down.
+       */
+
+      /* shift needs to be the same width as i, and unsigned so that / 2
+       * becomes a rshift operation */
+      uint32_t shift;
+      i = 0;
+
+      if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+         int16_t tmp;
+         while ((shift = __builtin_ffsll(bmask0))) {
+            i += (shift - 1) / 2;
+            bmask0 >>= shift - 1;
+            *dec->mpeg12_data++ = dec->zscan[i] * 2;
+            tmp = inverse_quantize(blocks[i], quant_matrix[i], mpeg1);
+            *dec->mpeg12_data++ = tmp;
+            sum += tmp;
+            count++;
+            i++;
+            bmask0 >>= 2;
+         }
+         i = 32;
+         while ((shift = __builtin_ffsll(bmask1))) {
+            i += (shift - 1) / 2;
+            bmask1 >>= shift - 1;
+            *dec->mpeg12_data++ = dec->zscan[i] * 2;
+            tmp = inverse_quantize(blocks[i], quant_matrix[i], mpeg1);
+            *dec->mpeg12_data++ = tmp;
+            sum += tmp;
+            count++;
+            i++;
+            bmask1 >>= 2;
+         }
+      } else {
+         while ((shift = __builtin_ffsll(bmask0))) {
+            i += (shift - 1) / 2;
+            bmask0 >>= shift - 1;
+            *dec->mpeg12_data++ = i * 2;
+            *dec->mpeg12_data++ = blocks[i];
+            count++;
+            i++;
+            bmask0 >>= 2;
+         }
+         i = 32;
+         while ((shift = __builtin_ffsll(bmask1))) {
+            i += (shift - 1) / 2;
+            bmask1 >>= shift - 1;
+            *dec->mpeg12_data++ = i * 2;
+            *dec->mpeg12_data++ = blocks[i];
+            count++;
+            i++;
+            bmask1 >>= 2;
+         }
+      }
+#undef wordmask
+#else
+
+      /*
+       * This loop looks ridiculously written... and it is. I tried a lot of
+       * different ways of achieving this scan, and this was the fastest, at
+       * least on a Core i7-920. Note that it's not necessary to skip the 0's,
+       * the firmware will deal with those just fine. But it's faster to skip
+       * them. Note to people trying benchmarks: make sure to use realistic
+       * mpeg data, which can often be a single data point first followed by
+       * 63 0's, or <data> 7x <0> <data> 7x <0> etc.
+       */
+      i = 0;
+      if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+         while (true) {
+            int16_t tmp;
+            while (likely(i < 64 && !(tmp = blocks[i]))) i++;
+            if (i >= 64) break;
+            *dec->mpeg12_data++ = dec->zscan[i] * 2;
+            tmp = inverse_quantize(tmp, quant_matrix[i], mpeg1);
+            *dec->mpeg12_data++ = tmp;
+            sum += tmp;
+            count++;
+            i++;
+         }
+      } else {
+         while (true) {
+            int16_t tmp;
+            while (likely(i < 64 && !(tmp = blocks[i]))) i++;
+            if (i >= 64) break;
+            *dec->mpeg12_data++ = i * 2;
+            *dec->mpeg12_data++ = tmp;
+            count++;
+            i++;
+         }
+      }
+
+#endif
+
+      if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+         if (!mpeg1 && (sum & 1) == 0) {
+            if (count && *(dec->mpeg12_data - 2) == 63 * 2) {
+               uint16_t *val = dec->mpeg12_data - 1;
+               if (*val & 1) *val -= 1;
+               else *val += 1;
+            } else {
+               *dec->mpeg12_data++ = 63 * 2;
+               *dec->mpeg12_data++ = 1;
+               count++;
+            }
+         }
+      }
+
+      if (count) {
+         *(dec->mpeg12_data - 2) |= 1;
+      } else {
+         *dec->mpeg12_data++ = 1;
+         *dec->mpeg12_data++ = 0;
+         count = 1;
+      }
+      info.block_counts[block_index] = count;
+      blocks += 64;
+   }
+
+   memcpy(dec->mpeg12_mb_info, &info, sizeof(info));
+   dec->mpeg12_mb_info += sizeof(info);
+
+   if (macrob->num_skipped_macroblocks) {
+      info.index++;
+      info.coded_block_pattern = 0;
+      info.skipped = macrob->num_skipped_macroblocks - 1;
+      memset(info.block_counts, 0, sizeof(info.block_counts));
+      memcpy(dec->mpeg12_mb_info, &info, sizeof(info));
+      dec->mpeg12_mb_info += sizeof(info);
+   }
+}
+
+struct mpeg12_header {
+   uint32_t luma_top_size; // 00
+   uint32_t luma_bottom_size; // 04
+   uint32_t chroma_top_size; // 08
+   uint32_t mbs; // 0c
+   uint32_t mb_info_size; // 10
+   uint32_t mb_width_minus1; // 14
+   uint32_t mb_height_minus1; // 18
+   uint32_t width; // 1c
+   uint32_t height; // 20
+   uint8_t progressive; // 24
+   uint8_t mocomp_only; // 25
+   uint8_t frames; // 26
+   uint8_t picture_structure; // 27
+   uint32_t unk28; // 28 -- 0x50100
+   uint32_t unk2c; // 2c
+   uint32_t pad[4 * 13];
+};
+
+void
+nv84_decoder_vp_mpeg12(struct nv84_decoder *dec,
+                       struct pipe_mpeg12_picture_desc *desc,
+                       struct nv84_video_buffer *dest)
+{
+   struct nouveau_pushbuf *push = dec->vp_pushbuf;
+   struct nv84_video_buffer *ref1 = (struct nv84_video_buffer *)desc->ref[0];
+   struct nv84_video_buffer *ref2 = (struct nv84_video_buffer *)desc->ref[1];
+   struct nouveau_pushbuf_refn bo_refs[] = {
+      { dest->interlaced, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { NULL, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { NULL, NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM },
+      { dec->mpeg12_bo, NOUVEAU_BO_RDWR | NOUVEAU_BO_GART },
+   };
+   int i, num_refs = sizeof(bo_refs) / sizeof(*bo_refs);
+   struct mpeg12_header header = {0};
+   struct nv50_miptree *y = nv50_miptree(dest->resources[0]);
+   struct nv50_miptree *uv = nv50_miptree(dest->resources[1]);
+
+   STATIC_ASSERT(sizeof(struct mpeg12_header) == 0x100);
+
+   if (ref1 == NULL)
+      ref1 = dest;
+   if (ref2 == NULL)
+      ref2 = dest;
+   bo_refs[1].bo = ref1->interlaced;
+   bo_refs[2].bo = ref2->interlaced;
+
+   header.luma_top_size = y->layer_stride;
+   header.luma_bottom_size = y->layer_stride;
+   header.chroma_top_size = uv->layer_stride;
+   header.mbs = mb(dec->base.width) * mb(dec->base.height);
+   header.mb_info_size = dec->mpeg12_mb_info - dec->mpeg12_bo->map - 0x100;
+   header.mb_width_minus1 = mb(dec->base.width) - 1;
+   header.mb_height_minus1 = mb(dec->base.height) - 1;
+   header.width = align(dec->base.width, 16);
+   header.height = align(dec->base.height, 16);
+   header.progressive = desc->frame_pred_frame_dct;
+   header.frames = 1 + (desc->ref[0] != NULL) + (desc->ref[1] != NULL);
+   header.picture_structure = desc->picture_structure;
+   header.unk28 = 0x50100;
+
+   memcpy(dec->mpeg12_bo->map, &header, sizeof(header));
+
+   PUSH_SPACE(push, 10 + 3 + 2);
+
+   nouveau_pushbuf_refn(push, bo_refs, num_refs);
+
+   BEGIN_NV04(push, SUBC_VP(0x400), 9);
+   PUSH_DATA (push, 0x543210); /* each nibble possibly a dma index */
+   PUSH_DATA (push, 0x555001); /* constant */
+   PUSH_DATA (push, dec->mpeg12_bo->offset >> 8);
+   PUSH_DATA (push, (dec->mpeg12_bo->offset + 0x100) >> 8);
+   PUSH_DATA (push, (dec->mpeg12_bo->offset + 0x100 +
+                     align(0x20 * mb(dec->base.width) *
+                           mb(dec->base.height), 0x100)) >> 8);
+   PUSH_DATA (push, dest->interlaced->offset >> 8);
+   PUSH_DATA (push, ref1->interlaced->offset >> 8);
+   PUSH_DATA (push, ref2->interlaced->offset >> 8);
+   PUSH_DATA (push, 6 * 64 * 8 * header.mbs);
+
+   BEGIN_NV04(push, SUBC_VP(0x620), 2);
+   PUSH_DATA (push, 0);
+   PUSH_DATA (push, 0);
+
+   BEGIN_NV04(push, SUBC_VP(0x300), 1);
+   PUSH_DATA (push, 0);
+
+   for (i = 0; i < 2; i++) {
+      struct nv50_miptree *mt = nv50_miptree(dest->resources[i]);
+      mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
+   }
+   PUSH_KICK (push);
+}
-- 
1.8.1.5

^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2013-07-16 21:50 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-06-27 11:26 [PATCH] nv50: H.264/MPEG2 decoding support via VP2, available on NV84-NV96, NVA0 Ilia Mirkin
2013-06-29 18:07 ` [Nouveau] " Emil Velikov
2013-06-29 20:21   ` Ilia Mirkin
2013-06-30  0:33     ` Emil Velikov
2013-06-30  1:02       ` Ilia Mirkin
     [not found] ` <1372332404-16547-1-git-send-email-imirkin-FrUbXkNCsVf2fBVCVOL8/A@public.gmane.org>
2013-06-30  5:17   ` [PATCH v2] " Ilia Mirkin
     [not found]     ` <1372569420-18489-1-git-send-email-imirkin-FrUbXkNCsVf2fBVCVOL8/A@public.gmane.org>
2013-07-16 21:50       ` [PATCH v3] " Ilia Mirkin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.