首页 文章

使用OpenSceneGraph无窗口系统的连续屏幕外渲染/屏幕捕获

提问于
浏览
0

我目前正在使用屏幕外渲染器,以便我可以为真实场景进行互信息注册 . 我使用OpenSceneGraph来处理大数据和自动加载 . 我在顺序的单线程程序中获取帧缓冲区时遇到问题 .

好吧,我有这个类( Headers ):

#include <osg/ref_ptr>
#include <osg/Array>
#include <osg/ImageUtils>
#include <osgGA/StateSetManipulator>
#include <osgViewer/Viewer>
#include <osg/GraphicsContext>
#include <osg/Texture2D>
#include <osg/FrameBufferObject>
#include <osgDB/WriteFile>
#include <osg/Referenced>
#include <osg/Vec3>
#include <osg/Image>
#include <osg/State>
#include <string>
#include <chrono>
#include <thread>
#include <assert.h>

#include "ImagingPrimitives.h"

class BoundRenderScene {
public:
    BoundRenderScene();
    virtual ~BoundRenderScene();
    void NextFrame(void);
    inline OpenThreads::Mutex* GetMutexObject(void) { return &_mutex; }

    inline osg::Image* GetFrame(void)
    {
        OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
        return _frame.get();
    }

    inline void GetFrame(osg::Image* img)
    {
        OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);

        if(_frame.valid() && (img!=NULL) && img->valid())
        {
            glReadBuffer(GL_BACK);
            img->readPixels(0,0,_camera_configuration->GetSX(),_camera_configuration->GetSY(), GL_RGB,GL_UNSIGNED_BYTE);
            uint w = img->s(), h = img->t(), d = img->r(), c = uint(img->getPixelSizeInBits()/8);
            /*
             * bare testing write op
             * osgDB::writeImageFile(const_cast<const osg::Image&>(*img), "/tmp/testimg.png");
             */
        }
    }

    inline void SetCameraConfiguration(CameraConfiguration* configuration) { _camera_configuration = configuration; }
    inline void SetCameraMatrix(osg::Matrixd camera_matrix) { _camera_matrix = camera_matrix; }
    inline void SetScene(osg::Node* scene) { _scene = scene; }

    inline void Initialize(void) {
        if(!_initialized)
            _init();
        else
            _re_init();
    }

protected:
    osgViewer::Viewer _viewer;
    osg::Matrixd _camera_matrix;
    osg::ref_ptr<osg::Texture2D> _tex;
    osg::ref_ptr<osg::FrameBufferObject> _fbo;
    mutable osg::ref_ptr<osg::Image> _frame;
    osg::ref_ptr<osg::Node> _scene;
    osg::ref_ptr<osg::GraphicsContext::Traits> _traits;
    osg::ref_ptr<osg::GraphicsContext> _gc;
    CameraConfiguration* _camera_configuration;
    SnapshotCallback* cb;
    std::string _filepath;

private:
    void _init(void);
    void _re_init(void);
    bool _initialized;
    mutable OpenThreads::Mutex  _mutex;

    osg::Matrixd pre_transform;
    osg::Matrixd transformation;
};

此外,因为屏幕外渲染和屏幕捕获中的许多示例都使用Post / FinalDrawCallaback,我从“osgdistortion”示例中复制了回调结构,但添加了用于同步的互斥锁:

struct SnapshotCallback : public osg::Camera::DrawCallback
{
public:
    inline SnapshotCallback(OpenThreads::Mutex* mtx_obj, std::string filepath, int width, int height) : _filepath(filepath), _output_to_file(false), _mutex(mtx_obj)
    {
        _image = new osg::Image();
        _image->allocateImage(width, height, 1, GL_RGB, GL_UNSIGNED_BYTE);
        if(filepath!="")
            _output_to_file = true;

    }

    inline virtual void operator() (osg::RenderInfo& renderInfo) const
    {
        OpenThreads::ScopedLock<OpenThreads::Mutex> lock(*_mutex);
        osg::Camera* camera = renderInfo.getCurrentCamera();
        osg::Viewport* viewport = camera ? camera->getViewport() : 0;
        if(viewport && _image.valid())
        {
            glReadBuffer(GL_BACK);
            _image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
            if(_output_to_file)
            {
                osgDB::writeImageFile(*_image, _filepath);
            }
        }
    }

    inline virtual void operator() (const osg::Camera& camera) const
    {
        OpenThreads::ScopedLock<OpenThreads::Mutex> lock(*_mutex);
        osg::Viewport* viewport = camera.getViewport();
        if(viewport && _image.valid())
        {
            glReadBuffer(GL_BACK);
            _image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
            if(_output_to_file)
            {
                osgDB::writeImageFile(*_image, _filepath);
            }
        }
    }

    std::string _filepath;
    bool _output_to_file;

    mutable OpenThreads::Mutex*  _mutex;
    mutable osg::ref_ptr<osg::Image> _image;
};

我初始化并渲染场景如下:

#include "BoundRenderScene.h"

void BoundRenderScene::_init(void)
{
    if(_camera!=NULL)
        _viewer.setDone(true);

    _traits->x = 0;
    _traits->y = 0;
    _traits->width = _camera_configuration->GetSX();
    _traits->height = _camera_configuration->GetSY();
    _traits->red = 8;
    _traits->green = 8;
    _traits->blue = 8;
    _traits->alpha = 0;
    _traits->depth = 24;
    _traits->windowDecoration = false;
    _traits->pbuffer = true;
    _traits->doubleBuffer = true;
    _traits->sharedContext = 0x0;


    if(_gc.get()!=NULL)
    {
        bool release_success = _gc->releaseContext();
        if(!release_success)
            std::cerr << "Error releasing Graphics Context.";
    }
    _gc = osg::GraphicsContext::createGraphicsContext(_traits.get());
    _viewer.getCamera()->setGraphicsContext(_gc.get());

    _viewer.setThreadingModel(osgViewer::Viewer::SingleThreaded);
    _viewer.setUpThreading();
    _viewer.realize();


    _frame->allocateImage(_camera_configuration->GetSX(), _camera_configuration->GetSY(), 1, GL_RGB, GL_UNSIGNED_BYTE);

    _viewer.getCamera()->getOrCreateStateSet();
    _viewer.getCamera()->setRenderTargetImplementation(osg::Camera::PIXEL_BUFFER);

    cb = new SnapshotCallback(&_mutex,_filepath, _camera_configuration->GetSX(), _camera_configuration->GetSY());

    //_viewer.getCamera()->setPostDrawCallback( cb );

    //Clear colour "black" for representing "no information" => background elimination in natural image, pls.
    _viewer.getCamera()->setClearColor(osg::Vec4f(0.25f, 0.25f, 0.25f, 1.0f));
    _viewer.getCamera()->setClearMask(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
    _viewer.getCamera()->setDrawBuffer(GL_BACK);
    _viewer.getCamera()->setReadBuffer(GL_BACK);
    _viewer.getCamera()->setViewport(0,0,_camera_configuration->GetSX(),_camera_configuration->GetSY());
    _viewer.getCamera()->setProjectionMatrix(osg::Matrixd::perspective(osg::RadiansToDegrees(_camera_configuration->GetFoV()), _camera_configuration->GetAspectRatio(), 0.1, 150.0));
    //looking in geo-coord system
    _viewer.getCamera()->setViewMatrix(osg::Matrixd::lookAt(osg::Vec3d(0.0, 0.0, -1.0), osg::Vec3d(0.0, 0.0, 1.0), osg::Vec3d(0.0, 1.0, 0.0)));
    _viewer.getCamera()->attach(osg::Camera::COLOR_BUFFER, _frame.get());

    _viewer.getCamera()->setRenderTargetImplementation(osg::Camera::FRAME_BUFFER_OBJECT);
    _tex->setTextureSize(_camera_configuration->GetSX(), _camera_configuration->GetSY());
    _tex->setInternalFormat(GL_RGB);
    _tex->setFilter(osg::Texture::MIN_FILTER, osg::Texture::LINEAR);
    _tex->setFilter(osg::Texture::MAG_FILTER, osg::Texture::LINEAR);
    _tex->setWrap(osg::Texture::WRAP_S, osg::Texture::CLAMP_TO_EDGE);
    _tex->setWrap(osg::Texture::WRAP_T, osg::Texture::CLAMP_TO_EDGE);
    _tex->setResizeNonPowerOfTwoHint(false);
    _tex->setImage(0,_frame.get());

    _fbo->setAttachment(osg::Camera::COLOR_BUFFER, osg::FrameBufferAttachment(_tex.get()));
    _viewer.setDone(false);
    _viewer.setSceneData(_scene.get());
    _viewer.setCameraManipulator(0x0);
}

void BoundRenderScene::NextFrame(void)
{
    OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
    if(_frame.valid() && !_viewer.done())
    {
        osg::Matrixd inverse_cam = osg::Matrixd::inverse(_camera_matrix);
        transformation = inverse_cam * pre_transform;

        _viewer.getCamera()->setViewMatrix(transformation);
        _viewer.updateTraversal();
        _viewer.frame();
    }
    else
        std::cout << "Viewer or Camera invalid." << std::endl;
}

主要工作流程看起来像这样(简化):

BoundRenderScene renderer;
std::vector<osg::Matrixd> poses;
/*
 * setting initial parameters
 * fill poses with camera positions to render, for regsitration
 */
renderer._init();
for(uint i = 0; i < poses.size(); i++)
{
    renderer.SetCameraMatrix(poses.at(i));
    renderer.NextImage();
    sleep(0.04); // to get the 25fps frame limit
    osg::Image* reg_image = renderer.GetImage();
    /*
     * Do further processing
     */
}

现在是关键:OpenSceneGraph示例“osgprenderer”(包含在OSG中)使用osg :: Camera :: DrawCallback作为我的SnapshotCallback进行屏幕外渲染 . 不幸的是,在我的情况下,operator() - 函数永远不会在我的场景图中被调用,所以这种屏幕捕获方式对我来说不起作用 . 由于互信息程序的其余部分是一个相当顺序的管道,因此它也相当不方便 .

其他包装器(https://github.com/xarray/osgRecipes/blob/master/integrations/osgberkelium/osgberkelium.cpp)使用类似于我的"void GetFrame(osg::Image* img)"方法的方法,其中使用"readPixels"主动读取图像 . 这对我的工作流程非常方便,但该方法始终返回空白图像 . 它也没有做到这一点 .

有效的方法是“osg:and:Image * GetFrame(void)”,它返回绑定/附加的FBO图像 . 它类似于“osgdistortion”示例 . 它可以用于渲染一到两个图像,但经过一段时间后,渲染和处理失去同步,应用程序崩溃如下:

[---FIRST FRAME---]
GraphicsCostEstimator::calibrate(..)
cull_draw() 0x1998ca0
ShaderComposer::~ShaderComposer() 0x35a4d40
Renderer::compile()
OpenGL extension 'GL_ARB_vertex_buffer_object' is supported.
OpenGL extension 'GL_EXT_secondary_color' is supported.
OpenGL extension 'GL_EXT_fog_coord' is supported.
OpenGL extension '' is not supported.
OpenGL extension 'GL_EXT_packed_depth_stencil' is supported.
Setting up osg::Camera::FRAME_BUFFER_OBJECT
end cull_draw() 0x1998ca0
[processing]
[   SECOND FRAME   ]
cull_draw() 0x1998ca0
OpenGL extension 'GL_ARB_fragment_program' is supported.
OpenGL extension 'GL_ARB_vertex_program' is supported.
OpenGL extension 'GL_ARB_shader_objects' is supported.
OpenGL extension 'GL_ARB_vertex_shader' is supported.
OpenGL extension 'GL_ARB_fragment_shader' is supported.
OpenGL extension 'GL_ARB_shading_language_100' is supported.
OpenGL extension 'GL_EXT_geometry_shader4' is supported.
OpenGL extension 'GL_EXT_gpu_shader4' is supported.
OpenGL extension 'GL_ARB_tessellation_shader' is supported.
OpenGL extension 'GL_ARB_uniform_buffer_object' is supported.
OpenGL extension 'GL_ARB_get_program_binary' is supported.
OpenGL extension 'GL_ARB_gpu_shader_fp64' is supported.
OpenGL extension 'GL_ARB_shader_atomic_counters' is supported.
glVersion=4.5, isGlslSupported=YES, glslLanguageVersion=4.5
Warning: detected OpenGL error 'invalid operation' at end of SceneView::draw()
end cull_draw() 0x1998ca0

[-FROM 3rd FRAME ONWARDS-]
[workload, matrix setup]
[_viewer.frame()]
cull_draw() 0x1998ca0
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
end cull_draw() 0x1998ca0
[next frame]

[BREAKING]
cull_draw() 0x1998ca0
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
end cull_draw() 0x1998ca0
[more work]
Segmentation fault (core dumped)

所以,问题是:

  • 我查看了osg中与Viewer相关的类的源文件,但是我无法确定错误的位置

警告:在State :: apply()开始时检测到OpenGL错误'无效操作'

来自 . 知道从哪里开始寻找它?

  • 对于顺序渲染和屏幕捕获,哪种方法最适合在OSG中使用?

  • 如何获取普通osg :: Viewer的互斥锁,以便将渲染器与py管道的其余部分同步? (渲染器是单线程的)

  • 体验OpenSceneGraph屏幕外渲染器和屏幕截图的任何其他建议?

1 回答

  • 0

    随着更深入的研究结果发布,在类析构函数中释放图形上下文释放了OpenGL管道,但是:它还解除了加载的场景/模型的状态集绑定纹理,尽管模型本身没有被挂起(如问题所示:它在以下过程中重复使用) . 因此,在进一步的渲染过程中,渲染管道想要访问已经通过释放GL上下文释放的OSG资产 .

    在代码中,它改变了:

    BoundRenderScene::~BoundRenderScene() {
        // TODO Auto-generated destructor stub
        _viewer.setDone(true);
        _viewer.setReleaseContextAtEndOfFrameHint(true);
        _gc->releaseContext();
    
    #ifdef DEBUG
        std::cout << "BoundRenderScene deleted." << std::endl;
    #endif
    }
    

    至:

    BoundRenderScene::~BoundRenderScene() {
        // TODO Auto-generated destructor stub
        _viewer.setDone(true);
        _viewer.setReleaseContextAtEndOfFrameHint(true);
    
    #ifdef DEBUG
        std::cout << "BoundRenderScene deleted." << std::endl;
    #endif
    }
    

    这解决了OpenSceneGraph内部错误消息 . 现在,为了解决帧捕获问题本身,我实现了osgprenderer的回调:

    struct SnapshotCallback : public osg::Camera::DrawCallback
    {
    public:
        inline SnapshotCallback(std::string filepath) : _filepath(filepath), _output_to_file(false), _image(NULL)
        {
            if(filepath!="")
                _output_to_file = true;
            _image = new osg::Image();
        }
    
        inline virtual void operator() (osg::RenderInfo& renderInfo) const
        {
            osg::Camera* camera = renderInfo.getCurrentCamera();
            osg::Viewport* viewport = camera ? camera->getViewport() : 0;
            if(viewport)
            {
                glReadBuffer(camera->getDrawBuffer());
                _image->allocateImage(int(viewport->width()), int(viewport->height()), 1, GL_RGB, GL_UNSIGNED_BYTE);
                _image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
                if(_output_to_file)
                {
                    osgDB::writeImageFile(*reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL)), _filepath);
                }
            }
        }
    
        inline virtual void operator() (const osg::Camera& camera) const
        {
            osg::Viewport* viewport = camera.getViewport();
            if(viewport)
            {
                glReadBuffer(camera.getDrawBuffer());
                _image->allocateImage(int(viewport->width()), int(viewport->height()), 1, GL_RGB, GL_UNSIGNED_BYTE);
                _image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
                if(_output_to_file)
                {
                    osgDB::writeImageFile(*reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL)), _filepath);
                }
            }
        }
    
        inline osg::Image* GetImage(void)
        {
            return reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL));
        }
    
    protected:
        std::string _filepath;
        bool _output_to_file;
        mutable osg::ref_ptr<osg::Image> _image;
    };
    

    现在,使用克隆缓冲区而不是实际的图像缓冲区(从osgscreencapture示例中获取的想法),我确实得到了没有内存错误的真实图像 .

    对于双缓冲渲染,我虽然必须以某种方式渲染场景两次,以便正确的缓冲区包含对象的图像,但这对我的用例来说当前不是一个问题(I / O绑定渲染,不是操作绑定) ) .

    所以,主要功能如下:

    BoundRenderScene renderer;
    std::vector<osg::Matrixd> poses;
    /*
     * setting initial parameters
     * fill poses with camera positions to render, for registration
     */
    renderer._init();
    for(uint i = 0; i < poses.size(); i++)
    {
        renderer.SetCameraMatrix(poses.at(i));
        renderer.NextImage();
        renderer.NextImage();
        osg::Image* reg_image = renderer.GetImage();
        /*
         * Do further processing
         */
    }
    

相关问题