首页 文章

是否有daniel shiffmans openkinect的命令参考库

提问于
浏览
1

我正在尝试更新LuisCalçada在2010年制作的脚本 . 他修改了Daniel Shiffman的点 Cloud 示例文件,将点 Cloud 数据收集为文本文件 . 这是教程http://moullinex.tumblr.com/post/3180520798/catalina-music-video

问题是所有命令都显得过时,例如Kinect.enabledepth(true);更改为kinect2.initDepth(true)后返回错误“不适用于参数(布尔值)”或如果我删除它然后下一行返回错误说“函数processDepthImage(boolean)不存在”和我在示例文件中找不到等效命令

是否有某种参考库,我可以看到所有可能的命令(甚至更好的命令等效的预处理器)所以我可以通过脚本更新命令来使用新的openkinect库,如果没有那么这甚至是一个可能的任务对我来说,完成一个处理新手?

http://pastebin.com/7GNZAdz7

1 回答

  • 1

    以下是一些有用的内容:

    • 阅读available documentation and tutorials

    • 如果那不是't enough and javadocs/reference aren' t,请查看source code中的公共方法并阅读上面的注释

    • 在处理3中,您可以使用自动完成功能查看整个代码(包括库)中的可用方法和属性 . 您甚至可以启用Ctrl Space自动完成功能(如果您在首选项中设置它)

    Processing 3 preferences

    这可能会揭示函数 initDepth(true) 为"not applicable for arguments (boolean)"的原因 . 使用Processing 3完成功能,您可以看到实际上有 initDepth() 函数,但它不接受任何参数(因此删除 true ) .

    Kinect2 Processing wrapper auto-complete

    使用相同的方法,您可以轻松发现Kinect2类中的某些函数(v1)在Kinect2类中缺失(如 processDepthImage()getDepthFPS() ),您应该删除它们,而其他函数具有相同的功能但名称/签名不同(例如 quit() 在Kinect(v1)中,但 stopDevice()dispose() 在Kinect2中)

    这是一个应该编译的代码的粗略重构版本,但由于我没有Kinect 2可能无法100%工作:

    import org.openkinect.freenect.*;
    import org.openkinect.freenect2.*;
    import org.openkinect.processing.*;
    import org.openkinect.tests.*;
    
    // Daniel Shiffman
    // Kinect Point Cloud example
    // http://www.shiffman.net
    // https://github.com/shiffman/libfreenect/tree/master/wrappers/java/processing
    
    import java.util.*;
    import java.io.*;
    
    // Kinect Library object
    // Kinect Library object
    Kinect2 kinect2;
    
    float a = 0;
    
    // Size of kinect image
    int w = 640;
    int h = 480;
    
    // writing state indicator
    boolean write = false;
    
    // treshold filter initial value
    int fltValue = 950;
    
    
    // "recording" object. each vector element holds a coordinate map vector
    Vector <Object> recording = new Vector<Object>(); 
    
    
    // We'll use a lookup table so that we don't have to repeat the math over and over
    float[] depthLookUp = new float[2048];
    
    void setup() {
      size(800, 600, P3D);
      kinect2 = new Kinect2(this);
      kinect2.init();
      kinect2.initDevice();
      kinect2.initDepth();
      // We don't need the grayscale image in this example
      // so this makes it more efficient
      //kinect2.processDepthImage(false);
    
    
      // Lookup table for all possible depth values (0 - 2047)
      for (int i = 0; i < depthLookUp.length; i++) {
        depthLookUp[i] = rawDepthToMeters(i);
      }
    }
    
    void draw() {
    
      background(0);
      fill(255);
      textMode(SCREEN);
      text("Processing FR: " + (int)frameRate, 10, 16);
    
      // Get the raw depth as array of integers
      int[] depth = kinect2.getRawDepth();
    
      // We're just going to calculate and draw every 4th pixel (equivalent of 160x120)
      int skip = 4;
    
      // Translate and rotate
      translate(width/2, height/2, -50);
      rotateY(a);
    
      //noStroke();
      //lights();
    
    
      int index = 0;
    
    
      PVector[] frame = new PVector[19200];
    
    
      for (int x=0; x<w; x+=skip) {
        for (int y=0; y<h; y+=skip) {
          int offset = x+y*w;
    
          // Convert kinect data to world xyz coordinate
          int rawDepth = depth[offset];
    
          boolean flt = true;
          PVector v = depthToWorld(x, y, rawDepth);
          if (flt && rawDepth > fltValue)
          {
            v = depthToWorld(x, y, 2047);
          }
    
          frame[index] = v;
    
          index++;   
    
          stroke(map(rawDepth, 0, 2048, 0, 256));
          pushMatrix();
          // Scale up by 200
          float factor = 400;
          translate(v.x*factor, v.y*factor, factor-v.z*factor);
          //sphere(1);
          point(0, 0);
    
          //line (0,0,1,1);
          popMatrix();
        }
      }
    
      if (write == true) {
        recording.add(frame);
      }
    
    
      // Rotate
      //a += 0.015f;
    }
    
    // These functions come from: http://graphics.stanford.edu/~mdfisher/Kinect.html
    float rawDepthToMeters(int depthValue) {
      if (depthValue < 2047) {
        return (float)(1.0 / ((double)(depthValue) * -0.0030711016 + 3.3309495161));
      }
      return 0.0f;
    }
    
    PVector depthToWorld(int x, int y, int depthValue) {
    
      final double fx_d = 1.0 / 5.9421434211923247e+02;
      final double fy_d = 1.0 / 5.9104053696870778e+02;
      final double cx_d = 3.3930780975300314e+02;
      final double cy_d = 2.4273913761751615e+02;
    
      PVector result = new PVector();
      double depth =  depthLookUp[depthValue];//rawDepthToMeters(depthValue);
      result.x = (float)((x - cx_d) * depth * fx_d);
      result.y = (float)((y - cy_d) * depth * fy_d);
      result.z = (float)(depth);
      return result;
    }
    
    void stop() {
      kinect2.stopDevice();
      kinect2.dispose();
      super.stop();
    }
    
    
    int currentFile = 0;
    
    void saveFile() {
    }
    
    void keyPressed() { // Press a key to save the data
    
      if (key == '1')
      {
        fltValue += 50;
        println("fltValue: " + fltValue);
      } else if (key == '2')
      {
        fltValue -= 50;
        println("fltValue: " + fltValue);
      } else if (key=='4') {
        if (write == true) {
          write = false;
    
          println( "recorded " + recording.size() + " frames.");
    
          // saveFile();
    
          // save    
    
          Enumeration e = recording.elements();
    
          println("Stopped Recording " + currentFile);
          int i = 0;
          while (e.hasMoreElements()) {
    
            // Create one directory
            boolean success = (new File("out"+currentFile)).mkdir(); 
    
    
            PrintWriter output = createWriter("out"+currentFile+"/frame" + i++ +".txt");
            PVector [] frame = (PVector []) e.nextElement();
    
            for (int j = 0; j < frame.length; j++) {
              output.println(j + ", " + frame[j].x + ", " + frame[j].y + ", " + frame[j].z );
            }
            output.flush(); // Write the remaining data
            output.close();
          }
          currentFile++;
        }
      } else if (key == '3') {
        println("Started Recording "+currentFile);
        recording.clear();
    
        write = true;
      }
    }
    

    Update 许多原始代码仅对Kinect v1有效,尤其是深度数据周围的部分以及将深度值转换为真实世界值 .

    但是,库中附带的 DepthPointCloud2 示例可以很容易地重新调整用途:它只需要将这些点存储在内存中,直到它们被写入磁盘 . 唯一的另一个补充是过滤深度 . 原始代码仅包括远阈值,但实际上具有近剪切和远剪裁的选项可能证明是有用的 . 这是使用 SPACE 切换记录的修改示例, N/n 键增加/减少近削波和 F/f 键增加/减少远裁剪:

    // Daniel Shiffman //<>//
    // Thomas Sanchez Lengeling
    // Kinect Point Cloud example
    
    // https://github.com/shiffman/OpenKinect-for-Processing
    // http://shiffman.net/p5/kinect/
    
    import org.openkinect.processing.*;
    import java.nio.FloatBuffer;
    
    // Kinect Library object
    Kinect2 kinect2;
    
    // Angle for rotation
    float a = 3.1;
    
    //depth filtering
    float depthNear = 700;
    float depthFar = 950;
    
    //recording point clouds to disk
    boolean isRecording;
    ArrayList<ArrayList<PVector>> frames = new ArrayList<ArrayList<PVector>>();
    
    void setup() {
    
      // Rendering in P3D
      size(800, 600, P3D);
    
      kinect2 = new Kinect2(this);
      kinect2.initDepth();
      kinect2.initDevice();
    }
    
    
    void draw() {
      background(0);
    
      // Translate and rotate
      pushMatrix();
      translate(width/2, height/2, 50);
      rotateY(a);
    
      // We're just going to calculate and draw every 2nd pixel
      int skip = 2;
    
      // Get the raw depth as array of integers
      int[] depth = kinect2.getRawDepth();
    
      //create a new point cloud frame - a list of points
      ArrayList<PVector> frame = new ArrayList<PVector>(); 
    
      stroke(255);
      beginShape(POINTS);
      for (int x = 0; x < kinect2.depthWidth; x+=skip) {
        for (int y = 0; y < kinect2.depthHeight; y+=skip) {
          int offset = x + y * kinect2.depthWidth;
          float depthValue = depth[offset];
    
          //filter based on depth
          if(depthValue >= depthNear && depthValue <= depthFar){
    
          //calculte the x, y, z camera position based on the depth information
          PVector point = depthToPointCloudPos(x, y, depthValue);
    
          //add the point to the current frame if it's recording
          if(isRecording) frame.add(point);
    
          // Draw a point
          vertex(point.x, point.y, point.z);
    
          }
        }
      }
      endShape();
    
      popMatrix();
    
      //add the frame to the recording (list of frames) if recording
      if(isRecording) frames.add(frame);
    
      fill(255, 0, 0);
      text((int)frameRate+"fps\nrecording: " + isRecording + "\ndepthFar: " + depthFar + " depthNear: " + depthNear
           +"\nkeys:\nSPACE - toggle recording\nN/n - increase/decrease near clipping\nF/f - increase/decrease far clipping", 50, 50);
    
      // Rotate
      //a += 0.0015f;
    }
    
    void keyReleased(){
      //toggle recording using space
      if(key == ' ') {
        isRecording = !isRecording;
        //if it was recording, but now it's not, there should be some frames there to save
        if(!isRecording) saveFramesToDisk();
        else             frames.clear();//otherwise, there might have been a recording already, so clear any previous frames
      }
      //modify near/far filters
      int depthFilterAmount = 10;//depth filter increment
      if(key == 'n') depthNear -= depthFilterAmount;
      if(key == 'N') depthNear += depthFilterAmount;
      if(key == 'f') depthFar -= depthFilterAmount;
      if(key == 'F') depthFar += depthFilterAmount;
    }
    
    void saveFramesToDisk(){
      //create a timestamp string
      String folderName = "rec_"+day()+"-"+month()+"-"+year()+" "+hour()+":"+minute()+":"+second()+":"+millis();
      //make a folder with that name
      new File(folderName).mkdir();
      //count the number of frames
      int numFrames = frames.size();
      //for each frame
      for(int i = 0 ; i < numFrames; i++){
        //access the list of points
        ArrayList<PVector> frame = frames.get(i);
        //make a new text file for each frame - checkout nf() - really handy for naming files sequentially
        PrintWriter output = createWriter(folderName+"/frame" + nf(i,4) +".txt");
        //for each point in a frame
        for (int j = 0; j < frame.size(); j++) {
          //retrieve the point
          PVector p = frame.get(j);
          //write to file: index, x, y,z + new line character
          output.println(j + ", " + p.x + ", " + p.y + ", " + p.z );
        }
    
        output.flush(); // Write the remaining data
        output.close();
    
      }
    
      println("Wrote " + numFrames + " frames in " + folderName);
    
    }
    
    //calculte the xyz camera position based on the depth data
    PVector depthToPointCloudPos(int x, int y, float depthValue) {
      PVector point = new PVector();
      point.z = (depthValue);// / (1.0f); // Convert from mm to meters
      point.x = (x - CameraParams.cx) * point.z / CameraParams.fx;
      point.y = (y - CameraParams.cy) * point.z / CameraParams.fy;
      return point;
    }
    //camera information based on the Kinect v2 hardware
    static class CameraParams {
      static float cx = 254.878f;
      static float cy = 205.395f;
      static float fx = 365.456f;
      static float fy = 365.456f;
      static float k1 = 0.0905474;
      static float k2 = -0.26819;
      static float k3 = 0.0950862;
      static float p1 = 0.0;
      static float p2 = 0.0;
    }
    

相关问题