天天看點

Android Camera資料流分析全程記錄(overlay方式)

http://blog.chinaunix.net/uid-26215986-id-3573400.html

這裡為什麼要研究overlay方式呢?android camera需要driver和app層需要有大量資料需要傳輸,如果使用非overlay方式進行資料從driver到app層的傳輸,使系統性能受到很到影響,使系統速度變慢,同時會影響功耗等,而在camera preview module時,通常我們是不必要将采集的資料儲存下來的,而不像錄像module下,需要将資料儲存下來,是以overlay方式就是不經過資料回傳,直接顯示從driver的資料方式,采用這種方式app從無法擷取到資料,是以這種方式應用在preview方式下

這裡我是針對android4.0版本的,相對android2.x版本的overlay已經發生了很大的變化,想要研究這方面的可以自己去了解一下,這裡不再多說了

開始部分我就直接在這裡帶過了,系統初始打開camera時,調用到app的oncreate方法,這裡主要做了一下工作:

1.開始一個opencamera線程打開camera

2.執行個體化很多的對象,用于camera工作使用

3.執行個體化surfaceview和surfaceholder,并且填充了其中的surfacechanged,surfacedestoryed和surfacecreated這三個方式

4.開始一個preview線程用于preview過程

這其中3.4是我們這裡要關注的重點,上面執行個體化了這個surfaceview将決定了我們到底是否使用overlay方式

在這裡第三遍完成之後,系統會自動執行surfacechanged這個方式,每次顯示區域發生改變都會自動調用這個方法,剛開始打開camera時,顯示區域從無到有,是以必要這裡會想調用到surfacechanged方法

我們就還是看看在這裡都做了些什麼事情

public void surfacechanged(surfaceholder holder, int format, int w, int h) {

        // make sure we have a surface in the holder before proceeding.

        if (holder.getsurface() == null) {

            log.d(tag, "holder.getsurface()

== null");

            return;

        }

        log.v(tag, "surfacechanged.

w=" + w + ". h=" + h);

        // we need to save the holder for later

use, even when the mcameradevice

        // is null. this

could happen if onresume() is invoked

after this

        // function.

        msurfaceholder = holder;

        // the mcameradevice will be null if it

fails to connect to the camera

        // hardware. in this case we

will show a dialog and then finish the

        // activity, so it's

ok to ignore it.

        if (mcameradevice == null) return;

        // sometimes surfacechanged is called after onpause or before

onresume.

        // ignore it.

        if (mpausing || isfinishing()) return;

        setsurfacelayout();

        // set preview display if the

surface is being created. preview was

        // already started. also restart the preview if display

rotation has

        // changed. sometimes this happens when the device is held in portrait

        // and camera app is opened. rotation

animation takes some time and

        // display rotation in oncreate may not be

what we want.

        if (mcamerastate == preview_stopped) {//這裡表示第一次打開camera時,那麼調用startpreview

            startpreview(true);

            startfacedetection();

        } else {//這裡則表示camera已經打開過程中發生的顯示變化,比如橫屏豎頻轉換,是以zheli隻需要重新設定previewdisplay

            if (util.getdisplayrotation(this) != mdisplayrotation) {

                setdisplayorientation();

            }

            if (holder.iscreating()) {

                // set preview display if the

surface is being created and preview

                // was already started. that means preview display was set to null

                // and we need to set it now.

                setpreviewdisplay(holder);

        // if first time initialization is not finished, send

a message to do

        // it later. we want to finish

surfacechanged as soon as possible to let

        // user see preview first.

        if (!mfirsttimeinitialized) {

            mhandler.sendemptymessage(first_time_init);

        } else {

            initializesecondtime();

        surfaceview preview = (surfaceview) findviewbyid(r.id.camera_preview);

        camerainfo info = cameraholder.instance().getcamerainfo()[mcameraid];

        boolean mirror = (info.facing == camerainfo.camera_facing_front);

        int displayrotation = util.getdisplayrotation(this);

        int displayorientation = util.getdisplayorientation(displayrotation, mcameraid);

        mtouchmanager.initialize(preview.getheight() / 3, preview.getheight() / 3,

               preview, this, mirror, displayorientation);

    }

從上面代碼我們必須知道,在surface發生變化時必須調用setpreviewdisplay,根據之後的學習,在startpreview方式中真正startpreview之前同樣要調用setpreviewdisplay,在setpreviewdisplay的方法中完成了很多初始化,也是在這裡決定是否使用overlay方式的,我們就先看看startpreview這個方法吧

private void startpreview(boolean updateall) {

        mfocusmanager.resettouchfocus();

        mcameradevice.seterrorcallback(merrorcallback);

        // if we're

previewing already, stop the preview first (this will blank

        // the screen).

        if (mcamerastate != preview_stopped) stoppreview();

        setpreviewdisplay(msurfaceholder);

        setdisplayorientation();

        if (!msnapshotonidle) {

            // if the focus mode is continuous

autofocus, call cancelautofocus to

            // resume it because it may have been paused by autofocus call.

            if (parameters.focus_mode_continuous_picture.equals(mfocusmanager.getfocusmode())) {

                mcameradevice.cancelautofocus();

            mfocusmanager.setaeawblock(false); // unlock

ae and awb.

        if ( updateall ) {

            log.v(tag, "updating

all parameters!");

            setcameraparameters(update_param_initialize | update_param_zoom | update_param_preference);

            setcameraparameters(update_param_mode);

        //setcameraparameters(update_param_all);

        // inform the mainthread to go on the

ui initialization.

        if (mcamerapreviewthread != null) {

            synchronized (mcamerapreviewthread) {

                mcamerapreviewthread.notify();

        try {

            log.v(tag, "startpreview");

            mcameradevice.startpreview();

        } catch (throwable ex) {

            closecamera();

            throw new runtimeexception("startpreview failed", ex);

        mzoomstate = zoom_stopped;

        setcamerastate(idle);

        mfocusmanager.onpreviewstarted();

        if ( mtempbracketingenabled ) {

            mfocusmanager.settempbracketingstate(focusmanager.tempbracketingstates.active);

        if (msnapshotonidle) {

            mhandler.post(mdosnaprunnable);

上面大家看到了,先調用了setpreviewdisplay,最後調用mcameradevice.startpreview()開始preview

這裡過程如下:app-->frameworks-->jni-->camera client-->camera service-->hardware interface-->hal

1.setpreviewdisplay方法調用時在app層最初的傳入的參數是surfaceholder結構

2.到了jni層setpreviewdisplay方法傳入的參數已經是surface結構了

3.到了camera service層

    sp<ibinder> binder(surface != 0 ? surface->asbinder() : 0);

    sp<anativewindow> window(surface);

    return setpreviewwindow(binder, window);

    通過上面的轉換調用同名不同參數的另外一個方法,到這裡調用的參數已經轉變為ibinder和anativewindow

4.調用hardware interface的setpreviewwindow(window),這裡隻有一個anativewindow類型的參數

5.到了camerahal_module中轉站時又發生了變化,看看下面的定義,參數變為preview_stream_ops

這個類型的結構

    int camera_set_preview_window(struct camera_device * device, struct preview_stream_ops *window)

上面過程參數類型一直在變化,不過從app層一直傳到這裡,其實是對同一個記憶體位址的傳輸,就像張三換了身衣服,但是他還是張三一樣

現在我們就直接看看hal層的實作

/**

   @brief sets anativewindow object.

   preview buffers provided to camerahal via this object. displayadapter will be interfacing with it

   to render buffers to display.

   @param[in] window the

anativewindow object created by surface flinger

   @return no_error if the anativewindow object passes validation criteria

   @todo define validation criteria for anativewindow object. define error codes for scenarios

 */

status_t camerahal::setpreviewwindow(struct preview_stream_ops *window)

{

    status_t ret = no_error;

    cameraadapter::buffersdescriptor desc;

    log_function_name;

    msetpreviewwindowcalled = true;

   ///if the

camera service passes a null window, we destroy existing window and free

the displayadapter

    if(!window)//這種情況下,window是null,表示不采用overlay方式,則不需要建立displayadapter

    {

        if(mdisplayadapter.get() != null)

        {

            ///null window passed, destroy

the display adapter if present

            camhal_logd("null window passed, destroying display adapter");

            mdisplayadapter.clear();

            ///@remarks if there

was a window previously existing, we usually expect another valid window to be

passed by the client

            ///@remarks

so, we will wait until it passes a valid window to begin

the preview again

            msetpreviewwindowcalled = false;

        camhal_logd("null anativewindow passed to setpreviewwindow");

        return no_error;

    }else if(mdisplayadapter.get() == null)//傳入的window不是null,但是還沒有未使用overlay方式建立displayadapter,建立displayadapter

        // need to create the display adapter since it has not been

created

        // create display adapter

        mdisplayadapter = new anativewindowdisplayadapter();

        ret = no_error;

        if(!mdisplayadapter.get() || ((ret=mdisplayadapter->initialize())!=no_error))

            if(ret!=no_error)

            {

                mdisplayadapter.clear();

                camhal_logea("displayadapter initialize failed");

                log_function_name_exit;

                return ret;

            else

                camhal_logea("couldn't create displayadapter");

                return no_memory;

        // displayadapter needs to know where to get the

cameraframes from inorder to display

        // since cameraadapter is the one that provides the frames, set it

as the frame provider for displayadapter

        mdisplayadapter->setframeprovider(mcameraadapter);

        // any dynamic errors that happen during the camera use case has to be

propagated back to the application

        // via camera_msg_error. appcallbacknotifier is the class that

notifies such errors to the application

        // set it as the error handler for the

displayadapter

        mdisplayadapter->seterrorhandler(mappcallbacknotifier.get());

        // update the display adapter with the new window that is passed

from cameraservice

        ret = mdisplayadapter->setpreviewwindow(window);

        if(ret!=no_error)

            camhal_logeb("displayadapter setpreviewwindow returned error %d", ret);

        if(mpreviewstartinprogress)

            camhal_logda("setpreviewwindow called when preview running");

            // start the preview since the window is now available

            ret = startpreview();

    } else {//傳入的window不是null,并且displaadaper已經建立好,那麼這裡隻需要将新的window與已經建立好的displayadapter關聯即可

        if ( (no_error == ret) && previewenabled() ) {

            restartpreview();

        } else if (ret == already_exists) {

            // already_exists should be treated as a noop in this case

            ret = no_error;

    log_function_name_exit;

    return ret;

}

這裡我們重點看看建立displayadapter的過程:

1.執行個體化一個anativewindowdisplayadapter對象

2.mdisplayadapter->initialize()

3.mdisplayadapter->setframeprovider(mcameraadapter)//這一步是關鍵,之後會遇到的

4.mdisplayadapter->seterrorhandler(mappcallbacknotifier.get())

5.mdisplayadapter->setpreviewwindow(window);

做完了上面這些步驟之後,就是startpreview了

   @brief start preview mode.

   @param none

   @return no_error camera switched to vf mode

   @todo update function header with the different errors that are possible

status_t camerahal::startpreview() {

    // when tunneling is enabled during vtc, startpreview

happens in 2 steps:

    // when the application sends the command camera_cmd_preview_initialization,

    // camerapreviewinitialization() is called, which in turn

causes the cameraadapter

    // to move from loaded to idle

state. and when the application calls startpreview,

    // the cameraadapter moves from idle to executing state.

    //

    // if the application calls startpreview() without

sending the command

    // camera_cmd_preview_initialization, then the function camerapreviewinitialization()

    // and startpreview() are

executed. in other words, if the

application calls

    // startpreview() without

sending the command camera_cmd_preview_initialization,

    // then the cameraadapter moves from loaded to idle to executing

state in one shot.

    status_t ret = camerapreviewinitialization();

    // the flag mpreviewinitializationdone is set to true at

the end of the function

    // camerapreviewinitialization(). therefore, if everything

goes alright, then the

    // flag will be set. sometimes, the function camerapreviewinitialization() may

    // return prematurely if all the resources are not available for starting

preview.

    // for example, if the

preview window is not set, then it

would return no_error.

    // under such circumstances, one should return from startpreview as

well and should

    // not continue execution. that is why, we

check the flag and not the return value.

    if (!mpreviewinitializationdone) return

ret;

    // once startpreview is called, there is no

need to continue to remember whether

    // the function camerapreviewinitialization() was

called earlier or not. and so

    // the flag mpreviewinitializationdone is reset here. plus, this

preserves the

    // current behavior of startpreview under the circumstances where the application

    // calls startpreview twice or more.

    mpreviewinitializationdone = false;

    ///enable

the display adapter if present, actual

overlay enable happens when we post the buffer

    if(mdisplayadapter.get() != null) {

        camhal_logda("enabling display");

        int width, height;

        mparameters.getpreviewsize(&width, &height);

#if ppm_instrumentation || ppm_instrumentation_abs

        ret = mdisplayadapter->enabledisplay(width, height, &mstartpreview);

#else

        ret = mdisplayadapter->enabledisplay(width, height, null);

#endif

        if ( ret != no_error ) {

            camhal_logea("couldn't enable display");

            // fixme: at this stage mstateswitchlock is locked and unlock is supposed to be

called

            // only from mcameraadapter->sendcommand(cameraadapter::camera_start_preview)

            // below. but this will never happen because of goto error. thus

at next

            // startpreview() call camerahal

will be deadlocked.

            // need to revisit mstateswitch lock, for now just

abort the process.

            camhal_assert_x(false,

                "at this stage mcameraadapter->mstateswitchlock is still locked, "

                "deadlock is guaranteed");

            goto error;

    ///send start_preview command to adapter

    camhal_logda("starting cameraadapter preview mode");

    ret = mcameraadapter->sendcommand(cameraadapter::camera_start_preview);

    if(ret!=no_error) {

        camhal_logea("couldn't start preview w/ cameraadapter");

        goto error;

    camhal_logda("started preview");

    mpreviewenabled = true;

    mpreviewstartinprogress = false;

    error:

        camhal_logea("performing cleanup after error");

        //do all the cleanup

        freepreviewbufs();

        mcameraadapter->sendcommand(cameraadapter::camera_stop_preview);

        if(mdisplayadapter.get() != null) {

            mdisplayadapter->disabledisplay(false);

        mappcallbacknotifier->stop();

        mpreviewstartinprogress = false;

        mpreviewenabled = false;

        log_function_name_exit;

        return ret;

上面标出的camerapreviewinitialization()方法也十分關鍵,之前已經說過,之後如果需要會再做說明

enable the display adapter if present, actual

說明如果display adapter不是null,這裡會enable,overlay方式就啟動了

我們接着往下看,看看driver擷取的資料到底是怎樣處理的,startpreview會通過camerahal-->cameraapapter-->v4lcameradapter

調用到v4l2層的startpreview,下面看看他的具體是實作

status_t v4lcameraadapter::startpreview()

    mutex::autolock lock(mpreviewbufslock);

    if(mpreviewing) {

        ret = bad_value;

        goto exit;

    for (int i = 0; i < mpreviewbuffercountqueueable; i++) {

        mvideoinfo->buf.index = i;

        mvideoinfo->buf.type = v4l2_buf_type_video_capture;

        mvideoinfo->buf.memory = v4l2_memory_mmap;

        ret = v4lioctl(mcamerahandle, vidioc_qbuf, &mvideoinfo->buf);//請求配置設定記憶體

        if (ret < 0) {

            camhal_logea("vidioc_qbuf failed");

            goto exit;

        nqueued++;

    ret = v4lstartstreaming();

    // create and start preview thread for receiving

buffers from v4l camera

    if(!mcapturing) {

        mpreviewthread = new

previewthread(this);//開啟previewthread

        camhal_logda("created preview thread");

    //update the flag to indicate we are previewing

    mpreviewing = true;

    mcapturing = false;

exit:

int v4lcameraadapter::previewthread()

    int width, height;

    cameraframe frame;

    void *y_uv[2];

    int index = 0;

    int stride = 4096;

    char *fp = null;

    mparams.getpreviewsize(&width, &height);

    if (mpreviewing) {

        fp = this->getframe(index);

        if(!fp) {

            ret = bad_value;

        camerabuffer *buffer = mpreviewbufs.keyat(index);//擷取camerabuffer

        cameraframe *lframe = (cameraframe *)mframequeue.valuefor(buffer);//擷取cameraframe

        if (!lframe) {

        debugshowfps();

        if ( mframesubscribers.size() == 0 ) {

        y_uv[0] = (void*) lframe->myuv[0];

        //y_uv[1] = (void*) lframe->myuv[1];

        //y_uv[1] = (void*) (lframe->myuv[0] + height*stride);

        convertyuv422tonv12tiler ( (unsigned char*)fp, (unsigned

char*)y_uv[0], width, height);//convert

the data

        camhal_logvb("##...index= %d.;camera buffer= 0x%x; y= 0x%x; uv= 0x%x.",index, buffer, y_uv[0], y_uv[1] );

#ifdef save_raw_frames

        unsigned char* nv12_buff = (unsigned char*) malloc(width*height*3/2);

        //convert yuv422i to yuv420sp(nv12) & dump

the frame to a file

        convertyuv422tonv12 ( (unsigned char*)fp, nv12_buff, width, height);

        savefile( nv12_buff, ((width*height)*3/2) );//if

you want to save the data,save it

        free (nv12_buff);

        //填充frame結構,用于資料處理

        frame.mframetype = cameraframe::preview_frame_sync;

        frame.mbuffer = buffer;

        frame.mlength = width*height*3/2;

        frame.malignment = stride;

        frame.moffset = 0;

        frame.mtimestamp = systemtime(system_time_monotonic);

        frame.mframemask = (unsigned int)cameraframe::preview_frame_sync;

        if (mrecording)

            frame.mframemask |= (unsigned int)cameraframe::video_frame_sync;

            mframeswithencoder++;

        //這裡是重點,資料回調,或者使用overlay方式顯示這裡是決定性調用

        ret = setinitframerefcount(frame.mbuffer, frame.mframemask);

        if (ret != no_error) {

            camhal_logdb("error in setinitframerefcount %d", ret);

            ret = sendframetosubscribers(&frame);

現在就開始看看setinitframecount方法都做了些什麼

int basecameraadapter::setinitframerefcount(camerabuffer * buf, unsigned int mask)

  int ret = no_error;

  unsigned int lmask;

  log_function_name;

  if (buf == null)

      return -einval;

  for( lmask = 1; lmask < cameraframe::all_frames; lmask <<= 1){

    if( lmask & mask ){

      switch( lmask ){

      case cameraframe::image_frame:

          setframerefcount(buf, cameraframe::image_frame, (int) mimagesubscribers.size());

        break;

      case cameraframe::raw_frame:

          setframerefcount(buf, cameraframe::raw_frame, mrawsubscribers.size());

      case cameraframe::preview_frame_sync:

          setframerefcount(buf, cameraframe::preview_frame_sync, mframesubscribers.size());//這裡這個mframesubscribers對應的key上儲存着響應的callback方法

      case cameraframe::snapshot_frame:

          setframerefcount(buf, cameraframe::snapshot_frame, msnapshotsubscribers.size());

      case cameraframe::video_frame_sync:

          setframerefcount(buf,cameraframe::video_frame_sync, mvideosubscribers.size());

      case cameraframe::frame_data_sync:

          setframerefcount(buf, cameraframe::frame_data_sync, mframedatasubscribers.size());

      case cameraframe::reprocess_input_frame:

          setframerefcount(buf,cameraframe::reprocess_input_frame, mvideoinsubscribers.size());

      default:

        camhal_logeb("frametype not supported 0x%x", lmask);

      }//switch

      mask &= ~lmask;

    }//if

  }//for

  log_function_name_exit;

  return ret;

上面我标注的部分通過enablemsgtype方法實作mframesubscribers.add的,經callback添加到對應的key處,算是實作關聯,

同樣的通過disablemsgtype方法實作mframesubscribers.removeitem的,具體在哪裡調用enablemsgtype和disablemsgtype之後再給予說明

void basecameraadapter::setframerefcount(camerabuffer * framebuf, cameraframe::frametype

frametype, int refcount)

    switch ( frametype )

        case cameraframe::image_frame:

        case cameraframe::raw_frame:

                {

                mutex::autolock lock(mcapturebufferlock);

                mcapturebuffersavailable.replacevaluefor(framebuf, refcount);

                }

            break;

        case cameraframe::snapshot_frame:

                mutex::autolock lock(msnapshotbufferlock);

                msnapshotbuffersavailable.replacevaluefor( ( unsigned int ) framebuf, refcount);

        case cameraframe::preview_frame_sync:

                mutex::autolock lock(mpreviewbufferlock)

                mpreviewbuffersavailable.replacevaluefor(framebuf, refcount);//這裡我的了解是refcount和framebuf實作了綁定,即camerabuf儲存在mpreviewbuffersavailable對應的key處

        case cameraframe::frame_data_sync:

                mutex::autolock lock(mpreviewdatabufferlock);

                mpreviewdatabuffersavailable.replacevaluefor(framebuf, refcount);

        case cameraframe::video_frame_sync:

                mutex::autolock lock(mvideobufferlock);

                mvideobuffersavailable.replacevaluefor(framebuf, refcount);

        case cameraframe::reprocess_input_frame: {

            mutex::autolock lock(mvideoinbufferlock);

            mvideoinbuffersavailable.replacevaluefor(framebuf, refcount);

        default:

        };

接下我們看看sendframetosubscribers方法的具體實作過程

status_t basecameraadapter::sendframetosubscribers(cameraframe *frame)

    unsigned int mask;

    if ( null == frame )

        camhal_logea("invalid cameraframe");

        return -einval;

    for( mask = 1; mask < cameraframe::all_frames; mask <<= 1){

      if( mask & frame->mframemask ){

        switch( mask ){

          {

            camerahal::ppm("shot

to jpeg: ", &mstartcapture);

            ret = __sendframetosubscribers(frame, &mimagesubscribers, cameraframe::image_frame);

          }

          break;

            ret = __sendframetosubscribers(frame, &mrawsubscribers, cameraframe::raw_frame);

            ret = __sendframetosubscribers(frame, &mframesubscribers, cameraframe::preview_frame_sync);

            ret = __sendframetosubscribers(frame, &msnapshotsubscribers, cameraframe::snapshot_frame);

            ret = __sendframetosubscribers(frame, &mvideosubscribers, cameraframe::video_frame_sync);

            ret = __sendframetosubscribers(frame, &mframedatasubscribers, cameraframe::frame_data_sync);

        case cameraframe::reprocess_input_frame:

            ret = __sendframetosubscribers(frame, &mvideoinsubscribers, cameraframe::reprocess_input_frame);

          camhal_logeb("frametype not supported 0x%x", mask);

        }//switch

        frame->mframemask &= ~mask;

      }//if

    }//for

 exit:

status_t basecameraadapter::__sendframetosubscribers(cameraframe* frame,

                                                     keyedvector<int, frame_callback> *subscribers,

                                                     cameraframe::frametype frametype)

    size_t refcount = 0;

    frame_callback callback = null;

    frame->mframetype = frametype;

    if ( (frametype == cameraframe::preview_frame_sync) ||

         (frametype == cameraframe::video_frame_sync) ||

         (frametype == cameraframe::snapshot_frame) ){

        if (mframequeue.size() > 0){

          cameraframe *lframe = (cameraframe *)mframequeue.valuefor(frame->mbuffer);

          frame->myuv[0] = lframe->myuv[0];

          frame->myuv[1] = frame->myuv[0] + (frame->mlength + frame->moffset)*2/3;

        else{

          camhal_logda("empty frame queue");

          return -einval;

      }

    if (null != subscribers) {

        refcount = getframerefcount(frame->mbuffer, frametype);//通過這個refcount可以找到對應的callback方法

        if (refcount == 0) {

            camhal_logda("invalid ref count of 0");

            return -einval;

        if (refcount > subscribers->size()) {

            camhal_logeb("invalid ref count for frame type: 0x%x", frametype);

        camhal_logvb("type of frame: 0x%x address: 0x%x refcount start %d",

                     frame->mframetype,

                     ( uint32_t ) frame->mbuffer,

                     refcount);

        for ( unsigned int i = 0 ; i < refcount; i++ ) {

            frame->mcookie = ( void * ) subscribers->keyat(i);

            callback = (frame_callback) subscribers->valueat(i);

            if (!callback) {

                camhal_logeb("callback not set for frame type: 0x%x", frametype);

                return -einval;

            callback(frame);

    } else {

        camhal_logea("subscribers is null??");

這裡别的我們先暫且不分析,但是callback到底是從哪裡來的,這個我們必須說清楚

上面在執行個體化displayadapter時有這樣一步:3.mdisplayadapter->setframeprovider(mcameraadapter)//這一步是關鍵,之後會遇到的

我們看看setframeprovider這個方法的實作:

int anativewindowdisplayadapter::setframeprovider(framenotifier *frameprovider)

    // check for null pointer

    if ( !frameprovider ) {

        camhal_logea("null passed for frame provider");

        return bad_value;

    //release any previous frame providers

    if ( null != mframeprovider ) {

        delete mframeprovider;

    /** dont do anything

here, just save the pointer for use when display is

         actually enabled or disabled

    */

    mframeprovider = new

frameprovider(frameprovider, this, framecallbackrelay);//執行個體化一個frameprovider,這其中有一個參數非常重要:framecallbackrelay,他的定義在下面給出

    return no_error;

void anativewindowdisplayadapter::framecallbackrelay(cameraframe* caframe)

    if ( null != caframe )

        if ( null != caframe->mcookie )

            anativewindowdisplayadapter *da = (anativewindowdisplayadapter*) caframe->mcookie;

            da->framecallback(caframe);

        else

            camhal_logeb("invalid cookie in camera frame = %p, cookie = %p", caframe, caframe->mcookie);

    else

        camhal_logeb("invalid camera frame = %p", caframe);

void anativewindowdisplayadapter::framecallback(cameraframe* caframe)

    ///call queuebuffer

of overlay in the context of the callback thread

    displayframe df;

    df.mbuffer = caframe->mbuffer;

    df.mtype = (cameraframe::frametype) caframe->mframetype;

    df.moffset = caframe->moffset;

    df.mwidthstride = caframe->malignment;

    df.mlength = caframe->mlength;

    df.mwidth = caframe->mwidth;

    df.mheight = caframe->mheight;

    postframe(df);

這個回調函數在這裡設定,等待資料回調,我們很有必要去看看frameprovider這個類的構造函數,他是怎樣讓其他方法調用到這個回調函數的呢

frameprovider(framenotifier *fn, void* cookie, frame_callback

framecallback)

        :mframenotifier(fn), mcookie(cookie),mframecallback(framecallback) { }

這個構造函數還是很有意思,沒有任何實作,隻是通過傳入的三個參數執行個體化了三個對象而已

1.mframenotifier(fn), //這裡mframenotifier就是camerasdapter

2.mcookie(cookie),

3.mframecallback(framecallback)//mframecallback指向我們定義好的callback方法

我們接着就需要到之前已經提到過的startpreview方法中camerapreviewinitialization的方法中去看了

////////////

   @brief set preview mode related initialization

          -> camera adapter set params

          -> allocate buffers

          -> set use buffers for preview

   @return no_error

status_t camerahal::camerapreviewinitialization()

    unsigned int required_buffer_count;

    unsigned int max_queueble_buffers;

        gettimeofday(&mstartpreview, null);

    if (mpreviewinitializationdone) {

    if ( mpreviewenabled ){

      camhal_logda("preview already running");

      log_function_name_exit;

      return already_exists;

    if ( null != mcameraadapter ) {

      ret = mcameraadapter->setparameters(mparameters);

    if ((mpreviewstartinprogress == false) && (mdisplaypaused == false)){

      ret = mcameraadapter->sendcommand(cameraadapter::camera_query_resolution_preview,( int ) &frame);

      if ( no_error != ret ){

        camhal_logeb("error: camera_query_resolution_preview %d", ret);

      ///update the current preview width and height

      mpreviewwidth = frame.mwidth;

      mpreviewheight = frame.mheight;

    ///if we

don't have the preview callback enabled and display adapter,

    if(!msetpreviewwindowcalled || (mdisplayadapter.get() == null)){

      camhal_logd("preview not started. preview in progress flag set");

      mpreviewstartinprogress = true;

      ret = mcameraadapter->sendcommand(cameraadapter::camera_switch_to_executing);

        camhal_logeb("error: camera_switch_to_executing %d", ret);

      return no_error;

    if( (mdisplayadapter.get() != null) && ( !mpreviewenabled ) && ( mdisplaypaused ) )

        camhal_logda("preview is in paused state");

        mdisplaypaused = false;

        mpreviewenabled = true;

        if ( no_error == ret )

            ret = mdisplayadapter->pausedisplay(mdisplaypaused);

            if ( no_error != ret )

                camhal_logeb("display adapter resume failed %x", ret);

        //restart preview callbacks

        if(mmsgenabled & camera_msg_preview_frame)

            mappcallbacknotifier->enablemsgtype (camera_msg_preview_frame);

        signalendimagecapture();

    required_buffer_count = atoi(mcameraproperties->get(cameraproperties::required_preview_bufs));

    ///allocate the preview buffers

    ret = allocpreviewbufs(mpreviewwidth, mpreviewheight, mparameters.getpreviewformat(), required_buffer_count, max_queueble_buffers);

    if ( no_error != ret )

        camhal_logea("couldn't allocate buffers for preview");

    if ( mmeasurementenabled )

        ret = mcameraadapter->sendcommand(cameraadapter::camera_query_buffer_size_preview_data,

                                          ( int ) &frame,

                                          required_buffer_count);

        if ( no_error != ret )

            return ret;

         ///allocate the preview data buffers

        ret = allocpreviewdatabufs(frame.mlength, required_buffer_count);

        if ( no_error != ret ) {

            camhal_logea("couldn't allocate preview data buffers");

           }

            desc.mbuffers = mpreviewdatabuffers;

            desc.moffsets = mpreviewdataoffsets;

            desc.mfd = mpreviewdatafd;

            desc.mlength = mpreviewdatalength;

            desc.mcount = ( size_t ) required_buffer_count;

            desc.mmaxqueueable = (size_t) required_buffer_count;

            mcameraadapter->sendcommand(cameraadapter::camera_use_buffers_preview_data,

                                        ( int ) &desc);

    ///pass the buffers to camera

adapter

    desc.mbuffers = mpreviewbuffers;

    desc.moffsets = mpreviewoffsets;

    desc.mfd = mpreviewfd;

    desc.mlength = mpreviewlength;

    desc.mcount = ( size_t ) required_buffer_count;

    desc.mmaxqueueable = (size_t) max_queueble_buffers;

    ret = mcameraadapter->sendcommand(cameraadapter::camera_use_buffers_preview,

                                      ( int ) &desc);

        camhal_logeb("failed to register preview buffers: 0x%x", ret);

    mappcallbacknotifier->startpreviewcallbacks(mparameters, mpreviewbuffers, mpreviewoffsets, mpreviewfd, mpreviewlength, required_buffer_count);

    ///start the callback notifier

    ret = mappcallbacknotifier->start();

    if( already_exists == ret )

        //already running, do nothing

        camhal_logda("appcallbacknotifier already running");

    else if ( no_error == ret ) {

        camhal_logda("started appcallbacknotifier..");

        mappcallbacknotifier->setmeasurements(mmeasurementenabled);

        camhal_logda("couldn't start appcallbacknotifier");

    if (ret == no_error) mpreviewinitializationdone = true;

我們就看看這個方法的是實作吧:mappcallbacknotifier->enablemsgtype (camera_msg_preview_frame);

status_t appcallbacknotifier::enablemsgtype(int32_t

msgtype)

    if( msgtype & camera_msg_preview_frame ) {

        mframeprovider->enableframenotification(cameraframe::preview_frame_sync);

    if( msgtype & camera_msg_postview_frame ) {

        mframeprovider->enableframenotification(cameraframe::snapshot_frame);

    if(msgtype & camera_msg_raw_image) {

        mframeprovider->enableframenotification(cameraframe::raw_frame);

int frameprovider::enableframenotification(int32_t

frametypes)

    ///enable the frame notification to cameraadapter (which

implements framenotifier interface)

    mframenotifier->enablemsgtype(frametypes<<messagenotifier::frame_bit_field_position, mframecallback, null, mcookie);

這裡這個enablemsgtype其實就是前面已經提到過的那個enablemsgtype方法,實作callback方法add到響應的key上

這裡這個mframenotifier是framenotifier的對象,framenotifier這個類繼承于messagenotifier

而basecameraadapter繼承于cameraadapter,cameraadapter又繼承于framenotifier,是以mframenotifier對象調用的enablemsgtype方法其實是一個虛函數,

最終調用的是basecameraadapter這個類中定義的enablemsgtype方法,我們來看一看他的實作:

void basecameraadapter::enablemsgtype(int32_t

msgs, frame_callback callback, event_callback eventcb, void* cookie)

    mutex::autolock lock(msubscriberlock);

    int32_t framemsg = ((msgs >> messagenotifier::frame_bit_field_position) & event_mask);

    int32_t eventmsg = ((msgs >> messagenotifier::event_bit_field_position) & event_mask);

    if ( framemsg != 0 )

        camhal_logvb("frame message type id=0x%x subscription request", framemsg);

        switch ( framemsg )

            case cameraframe::preview_frame_sync:

                mframesubscribers.add((int) cookie, callback);

                break;

            case cameraframe::frame_data_sync:

                mframedatasubscribers.add((int) cookie, callback);

            case cameraframe::snapshot_frame:

                msnapshotsubscribers.add((int) cookie, callback);

            case cameraframe::image_frame:

                mimagesubscribers.add((int) cookie, callback);

            case cameraframe::raw_frame:

                mrawsubscribers.add((int) cookie, callback);

            case cameraframe::video_frame_sync:

                mvideosubscribers.add((int) cookie, callback);

            case cameraframe::reprocess_input_frame:

                mvideoinsubscribers.add((int) cookie, callback);

            default:

                camhal_logea("frame message type id=0x%x subscription no supported yet!", framemsg);

    if ( eventmsg != 0)

        camhal_logvb("event message type id=0x%x subscription request", eventmsg);

        if ( camerahalevent::all_events == eventmsg )

            mfocussubscribers.add((int) cookie, eventcb);

            mshuttersubscribers.add((int) cookie, eventcb);

            mzoomsubscribers.add((int) cookie, eventcb);

            mmetadatasubscribers.add((int) cookie, eventcb);

            camhal_logea("event message type id=0x%x subscription no supported yet!", eventmsg);

這裡通過mframesubscribers.add((int) cookie, callback)這個方法将mframecallback回調函數與key相關聯

是以上面可以通過callback = (frame_callback) subscribers->valueat(i);

這個方法擷取callback的實作,因為上面已經實作了關聯,是以資料最終是通過上面分析道的方法繼續進行資料流顯示

    postframe(df);//這裡填充了displayframe這個結構,并調用postfrome實作顯示

這裡postframe成了我要研究的主要内容,将資料以displayframe結構的方式打包之後到底是怎麼實作顯示的呢??

status_t anativewindowdisplayadapter::postframe(anativewindowdisplayadapter::displayframe &dispframe)

    uint32_t actualframeswithdisplay = 0;

    android_native_buffer_t *buffer = null;

    graphicbuffermapper &mapper = graphicbuffermapper::get();

    int i;

    ///@todo do cropping

based on the stabilized frame coordinates

    ///@todo

insert logic to drop frames here based on refresh rate of

    ///display or rendering

rate whichever is lower

    ///queue the buffer to overlay

    if ( null == manativewindow ) {

        return no_init;

    if (!mbuffers || !dispframe.mbuffer) {

        camhal_logea("null sent to postframe");

    for ( i = 0; i < mbuffercount; i++ )

        if ( dispframe.mbuffer == &mbuffers[i] )

    mframestype.add( (int)mbuffers[i].opaque ,dispframe.mtype );

    if ( mdisplaystate == anativewindowdisplayadapter::display_started &&

                (!mpaused || cameraframe::cameraframe::snapshot_frame == dispframe.mtype) &&

                !msuspend)

        mutex::autolock lock(mlock);

        uint32_t xoff = (dispframe.moffset% page_size);

        uint32_t yoff = (dispframe.moffset / page_size);

        // set crop only if current

x and y offsets do not match with frame offsets

        if((mxoff!=xoff) || (myoff!=yoff))

            camhal_logdb("offset %d xoff = %d, yoff = %d", dispframe.moffset, xoff, yoff);

            uint8_t bytesperpixel;

            ///calculate bytes per pixel based on the

pixel format

            if(strcmp(mpixelformat, (const char *) cameraparameters::pixel_format_yuv422i) == 0)

                bytesperpixel = 2;

            else if(strcmp(mpixelformat, (const char *) cameraparameters::pixel_format_rgb565) == 0)

            else if(strcmp(mpixelformat, (const char *) cameraparameters::pixel_format_yuv420sp) == 0)

                bytesperpixel = 1;

            camhal_logvb(" crop.left = %d crop.top = %d crop.right = %d crop.bottom = %d",

                          xoff/bytesperpixel, yoff , (xoff/bytesperpixel)+mpreviewwidth, yoff+mpreviewheight);

            // we'll ignore any errors here, if the

surface is

            // already invalid, we'll

know soon enough.

            manativewindow->set_crop(manativewindow, xoff/bytesperpixel, yoff,

                                     (xoff/bytesperpixel)+mpreviewwidth, yoff+mpreviewheight);

            ///update the current x and y

offsets

            mxoff = xoff;

            myoff = yoff;

            buffer_handle_t *handle = (buffer_handle_t *) mbuffers[i].opaque;

            // unlock buffer before sending to display

            mapper.unlock(*handle);

            ret = manativewindow->enqueue_buffer(manativewindow, handle);

            camhal_loge("surface::queuebuffer returned error %d", ret);

        mframeswithcameraadaptermap.removeitem((buffer_handle_t *) dispframe.mbuffer->opaque);

        // hwcomposer has not minimum buffer requirement. we

should be able to dequeue

        // the buffer immediately

        tiutils::message msg;

        mdisplayq.put(&msg);

        if ( mmeasurestandby )

            camerahal::ppm("standby

to first shot: sensor change completed - ", &mstandbytoshot);

            mmeasurestandby = false;

        else if (cameraframe::cameraframe::snapshot_frame == dispframe.mtype)

to snapshot: ", &mstartcapture);

            mshottoshot = true;

        else if ( mshottoshot )

to shot: ", &mstartcapture);

            mshottoshot = false;

        buffer_handle_t *handle = (buffer_handle_t *) mbuffers[i].opaque;

        // unlock buffer before giving it up

        mapper.unlock(*handle);

        // cancel buffer and dequeue another one

        ret = manativewindow->cancel_buffer(manativewindow, handle);

            camhal_loge("surface::cancelbuffer returned error %d", ret);

這個顯示的過程相對來說還是比較複雜的,之後還需要花點時間研究一下

繼續閱讀