summaryrefslogtreecommitdiff
path: root/isp/omap3isp.c
diff options
context:
space:
mode:
authorLaurent Pinchart <laurent.pinchart@ideasonboard.com>2011-07-30 14:33:37 +0200
committerLaurent Pinchart <laurent.pinchart@ideasonboard.com>2011-07-30 14:33:37 +0200
commitfebcb53ca85d911619456c09c4be49fd73c4964b (patch)
tree12ae3a93d117b56da6e1213882f5cc6de3977adb /isp/omap3isp.c
omap3-isp-live: Initial commit
Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Diffstat (limited to 'isp/omap3isp.c')
-rw-r--r--isp/omap3isp.c750
1 files changed, 750 insertions, 0 deletions
diff --git a/isp/omap3isp.c b/isp/omap3isp.c
new file mode 100644
index 0000000..19671c7
--- /dev/null
+++ b/isp/omap3isp.c
@@ -0,0 +1,750 @@
+/*
+ * OMAP3 ISP library - OMAP3 ISP
+ *
+ * Copyright (C) 2010-2011 Ideas on board SPRL
+ *
+ * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This library is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at
+ * your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "controls.h"
+#include "omap3isp.h"
+#include "omap3isp-priv.h"
+#include "subdev.h"
+
+#define ENTITY_CCDC "OMAP3 ISP CCDC"
+#define ENTITY_PREVIEW "OMAP3 ISP preview"
+#define ENTITY_RESIZER "OMAP3 ISP resizer"
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+static int setup_link(struct omap3_isp_device *isp, struct media_entity *source,
+ struct media_entity *sink, __u32 flags)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < source->num_links; ++i) {
+ if (source->links[i].source->entity == source &&
+ source->links[i].sink->entity == sink)
+ break;
+ }
+
+ if (i == source->num_links)
+ return -ENOENT;
+
+ ret = media_setup_link(isp->mdev, source->links[i].source,
+ source->links[i].sink, flags);
+ if (ret < 0)
+ printf("error: unable to %s %s -> %s link.\n",
+ flags & MEDIA_LNK_FL_ENABLED ? "enable" : "disable",
+ source->info.name, sink->info.name);
+
+ return ret;
+}
+
+static struct media_entity_link *entity_output_link(struct media_entity *entity)
+{
+ unsigned int i;
+
+ for (i = 0; i < entity->num_links; ++i) {
+ if (entity->links[i].source->entity == entity &&
+ entity->links[i].flags & MEDIA_LNK_FL_ENABLED)
+ return &entity->links[i];
+ }
+
+ return NULL;
+}
+
+static struct media_entity *entity_output_node(struct media_entity *entity)
+{
+ struct media_entity *node;
+ unsigned int i;
+
+ for (i = 0; i < entity->num_links; ++i) {
+ node = entity->links[i].sink->entity;
+ if (media_entity_type(node) == MEDIA_ENT_T_DEVNODE)
+ break;
+ }
+
+ if (i == entity->num_links) {
+ printf("error: unable to locate %s output video node.\n",
+ entity->info.name);
+ return NULL;
+ }
+
+ return node;
+}
+
+static __u32 mbus_to_pix(enum v4l2_mbus_pixelcode code)
+{
+ switch (code) {
+ case V4L2_MBUS_FMT_SBGGR10_1X10:
+ return V4L2_PIX_FMT_SBGGR10;
+ case V4L2_MBUS_FMT_SGBRG10_1X10:
+ return V4L2_PIX_FMT_SGBRG10;
+ case V4L2_MBUS_FMT_SGRBG10_1X10:
+ return V4L2_PIX_FMT_SGRBG10;
+ case V4L2_MBUS_FMT_SRGGB10_1X10:
+ return V4L2_PIX_FMT_SRGGB10;
+ case V4L2_MBUS_FMT_UYVY8_1X16:
+ return V4L2_PIX_FMT_UYVY;
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ return V4L2_PIX_FMT_YUYV;
+ default:
+ return 0;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Open/close
+ */
+
+struct omap3_isp_device *omap3_isp_open(const char *devname,
+ const struct omap3_isp_operations *ops)
+{
+ struct omap3_isp_device *isp;
+ struct media_entity *entity;
+ unsigned int i;
+ int ret;
+
+ isp = malloc(sizeof *isp);
+ if (isp == NULL)
+ return NULL;
+
+ memset(isp, 0, sizeof *isp);
+ isp->ops = ops;
+
+ /* Open the media device and reset all links to make sure we're in a
+ * consistent, known state.
+ */
+ isp->mdev = media_open(devname, 0);
+ if (isp->mdev == NULL) {
+ printf("error: unable to open media device %s\n", devname);
+ goto error;
+ }
+
+ ret = media_reset_links(isp->mdev);
+ if (ret < 0) {
+ printf("error: unable to reset links.\n");
+ goto error;
+ }
+
+ /* Locate the entities that will be used in the pipelines. OMAP3 ISP
+ * modules are looked up by name.
+ */
+ isp->ccdc = media_get_entity_by_name(isp->mdev, ENTITY_CCDC);
+ isp->preview = media_get_entity_by_name(isp->mdev, ENTITY_PREVIEW);
+ isp->resizer = media_get_entity_by_name(isp->mdev, ENTITY_RESIZER);
+
+ if (isp->ccdc == NULL || isp->preview == NULL || isp->resizer == NULL) {
+ printf("error: unable to locate one or more ISP entities.\n");
+ goto error;
+ }
+
+ /* The sensor and video nodes are located by following links. */
+ for (i = 0; i < isp->ccdc->num_links; ++i) {
+ entity = isp->ccdc->links[i].source->entity;
+ if (media_entity_type(entity) == MEDIA_ENT_T_V4L2_SUBDEV &&
+ entity->info.pads == 1)
+ break;
+ }
+
+ if (i == isp->ccdc->num_links) {
+ printf("error: unable to locate sensor.\n");
+ goto error;
+ }
+
+ isp->sensor = entity;
+
+ isp->viewfinder.scaler = OMAP3_ISP_SCALER_ISP;
+ isp->viewfinder.subdev = isp->resizer;
+ isp->viewfinder.node = entity_output_node(isp->resizer);
+ if (isp->viewfinder.node == NULL)
+ goto error;
+
+ /* Retrieve the sensor default format. */
+ ret = v4l2_subdev_get_format(isp->sensor, &isp->sensor_format, 0,
+ V4L2_SUBDEV_FORMAT_TRY);
+ if (ret < 0) {
+ printf("error: unable to get sensor default format.\n");
+ goto error;
+ }
+
+ /* Setup the viewfinder pipeline. */
+ ret = setup_link(isp, isp->sensor, isp->ccdc, MEDIA_LNK_FL_ENABLED);
+ if (ret < 0)
+ goto error;
+
+ ret = setup_link(isp, isp->ccdc, isp->preview, MEDIA_LNK_FL_ENABLED);
+ if (ret < 0)
+ goto error;
+
+ ret = setup_link(isp, isp->preview, isp->resizer, MEDIA_LNK_FL_ENABLED);
+ if (ret < 0)
+ goto error;
+
+ ret = setup_link(isp, isp->resizer, isp->viewfinder.node,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret < 0)
+ goto error;
+
+ ret = omap3_isp_preview_setup(isp);
+ if (ret < 0) {
+ printf("error: unable to setup preview engine.\n");
+ goto error;
+ }
+
+ return isp;
+
+error:
+ omap3_isp_close(isp);
+ return NULL;
+}
+
+void omap3_isp_close(struct omap3_isp_device *isp)
+{
+ if (isp == NULL)
+ return;
+
+ media_close(isp->mdev);
+ free(isp);
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline management
+ */
+
+static int omap3_isp_pipeline_set_format(struct omap3_isp_device *isp,
+ struct v4l2_mbus_framefmt *ofmt,
+ enum omap3_isp_scaler scaler,
+ enum v4l2_subdev_format_whence which)
+{
+ struct v4l2_mbus_framefmt format;
+ struct media_entity_link *link;
+ struct media_entity_pad *pad;
+ struct media_entity *entity;
+ int ret;
+
+ /* Configure formats. Start from the sensor output and propagate the
+ * format through the pipeline.
+ */
+
+ /* When scaling on the ISP, select the sensor default output format.
+ * Otherwise scale as much as possible on the sensor.
+ */
+ if (scaler == OMAP3_ISP_SCALER_ISP)
+ format = isp->sensor_format;
+ else
+ format = *ofmt;
+
+ ret = v4l2_subdev_set_format(isp->sensor, &format, 0, which);
+ if (ret < 0) {
+ printf("error: get format on sensor output failed.\n");
+ return ret;
+ }
+
+ for (entity = isp->sensor; ; ) {
+
+ link = entity_output_link(entity);
+ if (link == NULL)
+ break;
+
+ entity = link->sink->entity;
+ if (media_entity_type(entity) == MEDIA_ENT_T_DEVNODE)
+ break;
+
+ pad = link->source;
+ ret = v4l2_subdev_get_format(pad->entity, &format, pad->index, which);
+ if (ret < 0) {
+ printf("error: get format failed on %s:%u.\n",
+ pad->entity->info.name, pad->index);
+ return ret;
+ }
+
+ /* Try to force the output format code onto the output pad. */
+ format.code = ofmt->code;
+ ret = v4l2_subdev_set_format(pad->entity, &format, pad->index, which);
+ if (ret < 0) {
+ printf("error: set format failed on %s:%u.\n",
+ pad->entity->info.name, pad->index);
+ return ret;
+ }
+
+ pad = link->sink;
+ ret = v4l2_subdev_set_format(pad->entity, &format, pad->index, which);
+ if (ret < 0) {
+ printf("error: set format failed on %s:%u.\n",
+ pad->entity->info.name, pad->index);
+ return ret;
+ }
+ }
+
+ if (ofmt == NULL)
+ return 0;
+
+ pad = link->source;
+ ret = v4l2_subdev_set_format(pad->entity, ofmt, pad->index, which);
+ if (ret < 0) {
+ printf("error: set format failed on %s:%u.\n",
+ pad->entity->info.name, pad->index);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Viewfinder
+ */
+
+static int omap3_isp_viewfinder_setup_pipeline(struct omap3_isp_device *isp,
+ struct v4l2_mbus_framefmt *ofmt)
+{
+ int ret;
+
+ /* Configure the formats on the pipeline. */
+ ret = omap3_isp_pipeline_set_format(isp, ofmt, isp->viewfinder.scaler,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+ if (ret < 0) {
+ printf("error: unable to configure formats on pipeline.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int omap3_isp_viewfinder_setup(struct omap3_isp_device *isp,
+ struct v4l2_mbus_framefmt *ofmt)
+{
+ struct v4l2_pix_format format;
+ int ret;
+
+ /* Setup the pipeline. */
+ ret = omap3_isp_viewfinder_setup_pipeline(isp, ofmt);
+ if (ret < 0)
+ return ret;
+
+ /* Open the V4L2 device. */
+ isp->viewfinder.video = v4l2_open(isp->viewfinder.node->devname);
+ if (isp->viewfinder.video == NULL) {
+ printf("error: unable to open video capture device %s\n",
+ isp->viewfinder.node->devname);
+ return -ENXIO;
+ }
+
+ /* Set the capture format on the output video node. */
+ memset(&format, 0, sizeof format);
+ format.pixelformat = mbus_to_pix(ofmt->code);
+ format.width = ofmt->width;
+ format.height = ofmt->height;
+
+ ret = v4l2_set_format(isp->viewfinder.video, &format);
+ if (ret < 0)
+ return ret;
+
+ isp->viewfinder.format = *ofmt;
+
+ return 0;
+}
+
+int omap3_isp_viewfinder_set_pool(struct omap3_isp_device *isp,
+ struct v4l2_buffers_pool *pool)
+{
+ int ret;
+
+ /* Allocate video buffers. */
+ ret = v4l2_alloc_buffers(isp->viewfinder.video, pool, V4L2_MEMORY_USERPTR);
+ if (ret < 0) {
+ printf("error: unable to allocate buffers for viewfinder.\n");
+ return ret;
+ }
+
+ isp->viewfinder.dequeued = 0;
+ isp->viewfinder.queued = 0;
+ return 0;
+}
+
+int omap3_isp_viewfinder_set_scaler(struct omap3_isp_device *isp,
+ enum omap3_isp_scaler scaler)
+{
+ struct v4l2_mbus_framefmt format;
+ int ret;
+
+ if (isp->viewfinder.scaler == scaler)
+ return 0;
+
+ isp->viewfinder.scaler = scaler;
+
+ /* If omap3_isp_viewfinder_setup() hasn't been called yet retur now. */
+ if (isp->viewfinder.format.width == 0 ||
+ isp->viewfinder.format.height == 0)
+ return 0;
+
+ format = isp->viewfinder.format;
+ ret = omap3_isp_viewfinder_setup_pipeline(isp, &format);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void omap3_isp_viewfinder_event(void *priv)
+{
+ struct omap3_isp_device *isp = priv;
+ struct v4l2_video_buffer buffer;
+ int ret;
+
+ /* Dequeue the buffer */
+ ret = v4l2_dequeue_buffer(isp->viewfinder.video, &buffer);
+ if (ret < 0) {
+ printf("error: unable to dequeue buffer: %s (%d)\n",
+ strerror(-ret), ret);
+ return;
+ }
+
+ isp->viewfinder.dequeued |= 1 << buffer.index;
+ isp->viewfinder.queued--;
+
+ if (isp->viewfinder.queued == 0)
+ isp->ops->unwatch_fd(isp->viewfinder.video->fd);
+
+ isp->ops->viewfinder_ready(isp, &buffer);
+}
+
+int omap3_isp_viewfinder_start(struct omap3_isp_device *isp)
+{
+ struct v4l2_video_buffer buffer;
+ unsigned int i;
+ int ret;
+
+ /* Queue all buffers for video capture. */
+ for (i = 0; i < isp->viewfinder.video->nbufs; ++i) {
+ if (isp->viewfinder.dequeued & (1 << i))
+ continue;
+
+ buffer.index = i;
+
+ ret = v4l2_queue_buffer(isp->viewfinder.video, &buffer);
+ if (ret < 0) {
+ printf("error: unable to queue buffer %u (%d)\n", i, ret);
+ return -errno;
+ }
+
+ isp->viewfinder.queued++;
+ }
+
+ if (isp->ops->prepare_streamon)
+ isp->ops->prepare_streamon(isp);
+
+ /* Watch the viewfinder file descriptor. */
+ isp->ops->watch_fd(isp->viewfinder.video->fd, OMAP3_ISP_EVENT_READ,
+ omap3_isp_viewfinder_event, isp);
+
+ ret = v4l2_stream_on(isp->viewfinder.video);
+ if (ret < 0) {
+ printf("error: streamon failed for viewfinder\n");
+ return ret;
+ }
+
+ isp->viewfinder.running = true;
+ return 0;
+}
+
+
+int omap3_isp_viewfinder_stop(struct omap3_isp_device *isp)
+{
+ int ret;
+
+ isp->ops->unwatch_fd(isp->viewfinder.video->fd);
+
+ ret = v4l2_stream_off(isp->viewfinder.video);
+ if (ret < 0) {
+ printf("error: streamoff failed for viewfinder\n");
+ return ret;
+ }
+
+ isp->viewfinder.queued = 0;
+ isp->viewfinder.running = false;
+ return 0;
+}
+
+int omap3_isp_viewfinder_put_buffer(struct omap3_isp_device *isp,
+ struct v4l2_video_buffer *buffer)
+{
+ isp->viewfinder.dequeued &= ~(1 << buffer->index);
+
+ if (!isp->viewfinder.running)
+ return 0;
+
+ if (isp->viewfinder.queued == 0)
+ isp->ops->watch_fd(isp->viewfinder.video->fd,
+ OMAP3_ISP_EVENT_READ,
+ omap3_isp_viewfinder_event, isp);
+
+ isp->viewfinder.queued++;
+ return v4l2_queue_buffer(isp->viewfinder.video, buffer);
+}
+
+/* -----------------------------------------------------------------------------
+ * Snapshot
+ *
+ * Snapshot capture is optional. Applications using snapshot capture must call
+ * omap3_isp_snapshot_setup() before starting the viewfinder.
+ */
+
+static int omap3_isp_snapshot_restore_pipeline(struct omap3_isp_device *isp)
+{
+ int ret;
+
+ ret = setup_link(isp, isp->snapshot.subdev, isp->snapshot.node, 0);
+ if (ret < 0)
+ return ret;
+
+ if (isp->snapshot.format.code == V4L2_MBUS_FMT_UYVY8_1X16 ||
+ isp->snapshot.format.code == V4L2_MBUS_FMT_YUYV8_1X16)
+ ret = setup_link(isp, isp->viewfinder.subdev, isp->viewfinder.node,
+ MEDIA_LNK_FL_ENABLED);
+ else
+ ret = setup_link(isp, isp->ccdc, isp->preview, MEDIA_LNK_FL_ENABLED);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int omap3_isp_snapshot_setup_pipeline(struct omap3_isp_device *isp,
+ struct v4l2_mbus_framefmt *ofmt,
+ enum v4l2_subdev_format_whence which)
+{
+ int ret;
+
+ /* Setup the links. */
+ if (ofmt->code == V4L2_MBUS_FMT_UYVY8_1X16 ||
+ ofmt->code == V4L2_MBUS_FMT_YUYV8_1X16)
+ ret = setup_link(isp, isp->viewfinder.subdev, isp->viewfinder.node, 0);
+ else
+ ret = setup_link(isp, isp->ccdc, isp->preview, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = setup_link(isp, isp->snapshot.subdev, isp->snapshot.node,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret < 0)
+ return ret;
+
+ /* Configure the formats on the pipeline. */
+ ret = omap3_isp_pipeline_set_format(isp, ofmt, isp->snapshot.scaler, which);
+ if (ret < 0) {
+ printf("error: unable to configure formats on pipeline.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int omap3_isp_snapshot_setup(struct omap3_isp_device *isp,
+ struct v4l2_mbus_framefmt *ofmt)
+{
+ struct v4l2_video_buffer buffer;
+ struct v4l2_pix_format format;
+ struct media_entity *entity;
+ unsigned int i;
+ int ret;
+
+ /* Locate the entity at the end of the pipeline. Use the CCDC for raw
+ * capture and the resizer for YUV capture.
+ */
+ switch (ofmt->code) {
+ case V4L2_MBUS_FMT_UYVY8_1X16:
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ entity = isp->resizer;
+ break;
+ default:
+ entity = isp->ccdc;
+ break;
+ }
+
+ isp->snapshot.scaler = OMAP3_ISP_SCALER_ISP;
+ isp->snapshot.subdev = entity;
+ isp->snapshot.node = entity_output_node(entity);
+ if (isp->snapshot.node == NULL)
+ return -ENOENT;
+
+ /* Try the format. */
+ ret = omap3_isp_snapshot_setup_pipeline(isp, ofmt, V4L2_SUBDEV_FORMAT_TRY);
+ if (ret < 0)
+ return ret;
+
+ isp->snapshot.format = *ofmt;
+
+ ret = omap3_isp_snapshot_restore_pipeline(isp);
+ if (ret < 0)
+ return ret;
+
+ /* Open the V4L2 device. */
+ isp->snapshot.video = v4l2_open(isp->snapshot.node->devname);
+ if (isp->snapshot.video == NULL) {
+ printf("error: unable to open snaphost capture device %s\n",
+ isp->snapshot.node->devname);
+ return -ENODEV;
+ }
+
+ /* Set the capture format on the output video node. */
+ memset(&format, 0, sizeof format);
+ format.pixelformat = mbus_to_pix(ofmt->code);
+ format.width = ofmt->width;
+ format.height = ofmt->height;
+
+ ret = v4l2_set_format(isp->snapshot.video, &format);
+ if (ret < 0)
+ return ret;
+
+ /* Pre-allocate capture buffers. */
+ isp->snapshot.pool = v4l2_buffers_pool_new(2);
+ if (isp->snapshot.pool == NULL) {
+ printf("error: unable to allocate buffers pool for snapshot.\n");
+ return -ENOMEM;
+ }
+
+ ret = v4l2_alloc_buffers(isp->snapshot.video, isp->snapshot.pool,
+ V4L2_MEMORY_MMAP);
+ if (ret < 0) {
+ printf("error: unable to allocate buffers for snapshot.\n");
+ return ret;
+ }
+
+ /* Queue all buffers. */
+ for (i = 0; i < isp->snapshot.video->nbufs; ++i) {
+ buffer.index = i;
+
+ ret = v4l2_queue_buffer(isp->snapshot.video, &buffer);
+ if (ret < 0) {
+ printf("error: unable to queue buffer %u\n", i);
+ return -errno;
+ }
+ }
+
+ return 0;
+}
+
+static void omap3_isp_snapshot_event(void *priv)
+{
+ struct omap3_isp_device *isp = priv;
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_video_buffer buffer;
+ unsigned int i;
+ int ret;
+
+ /* Dequeue a buffer, stop the stream and fire the snapshot event. */
+ ret = v4l2_dequeue_buffer(isp->snapshot.video, &buffer);
+ if (ret < 0) {
+ printf("error: unable to dequeue snapshot buffer.\n");
+ return;
+ }
+
+ isp->ops->unwatch_fd(isp->snapshot.video->fd);
+
+ ret = v4l2_stream_off(isp->snapshot.video);
+ if (ret < 0) {
+ printf("error: streamoff failed for snapshot\n");
+ return;
+ }
+
+ isp->snapshot.running = false;
+
+ isp->ops->snapshot_ready(isp, &buffer);
+
+ /* Resume the viewfinder. */
+ ret = omap3_isp_snapshot_restore_pipeline(isp);
+ if (ret < 0)
+ return;
+
+ format = isp->viewfinder.format;
+ ret = omap3_isp_viewfinder_setup_pipeline(isp, &format);
+ if (ret < 0)
+ return;
+
+ ret = omap3_isp_viewfinder_start(isp);
+ if (ret < 0) {
+ printf("error: unable to resume viewfinder.\n");
+ return;
+ }
+
+ /* Queue all buffers for the next snapshot. */
+ for (i = 0; i < isp->snapshot.video->nbufs; ++i) {
+ buffer.index = i;
+
+ ret = v4l2_queue_buffer(isp->snapshot.video, &buffer);
+ if (ret < 0) {
+ printf("error: unable to queue buffer %u\n", i);
+ return;
+ }
+ }
+}
+
+int omap3_isp_snapshot_capture(struct omap3_isp_device *isp)
+{
+ struct v4l2_mbus_framefmt format;
+ int ret;
+
+ /* Suspend the viewfinder. */
+ ret = omap3_isp_viewfinder_stop(isp);
+ if (ret < 0) {
+ printf("error: unable to suspend viewfinder.\n");
+ return ret;
+ }
+
+ /* Configure the pipeline. */
+ format = isp->snapshot.format;
+ ret = omap3_isp_snapshot_setup_pipeline(isp, &format, V4L2_SUBDEV_FORMAT_ACTIVE);
+ if (ret < 0) {
+ printf("error: unable to setup snapshot pipeline.\n");
+ return ret;
+ }
+
+ if (isp->ops->prepare_streamon)
+ isp->ops->prepare_streamon(isp);
+
+ /* Watch the snapshot file descriptor. */
+ isp->ops->watch_fd(isp->snapshot.video->fd, OMAP3_ISP_EVENT_READ,
+ omap3_isp_snapshot_event, isp);
+
+ ret = v4l2_stream_on(isp->snapshot.video);
+ if (ret < 0) {
+ printf("error: streamon failed for snapshot\n");
+ return ret;
+ }
+
+ isp->snapshot.running = true;
+ return 0;
+}
+
+int omap3_isp_snapshot_put_buffer(struct omap3_isp_device *isp __attribute__((__unused__)),
+ struct v4l2_video_buffer *buffer __attribute__((__unused__)))
+{
+ /* No-op, the stream is already stopped so we don't need to requeue the
+ * buffer.
+ */
+ return 0;
+}