ReconstructMe SDK  2.0.819-89134
Real-time 3D reconstruction engine
 All Classes Files Functions Typedefs Enumerations Enumerator Groups Pages
Typedefs | Enumerations
Sensor

Acquire data from real-world sensors or from file streams. More...

Typedefs

typedef int reme_sensor_t
 Handle referencing a sensor object.
 
typedef enum _reme_sensor_image_t reme_sensor_image_t
 Image type enumeration.
 
typedef enum _reme_sensor_view_t reme_sensor_view_t
 Sensor view type.
 
typedef enum
_reme_sensor_trackmode_t 
reme_sensor_trackmode_t
 Sensor tracking mode.
 
typedef enum
_reme_sensor_trackhint_t 
reme_sensor_trackhint_t
 Sensor tracking hint.
 
typedef enum
_reme_sensor_position_t 
reme_sensor_position_t
 Predefined sensor position.
 

Enumerations

enum  _reme_sensor_image_t {
  REME_IMAGE_AUX = 0,
  REME_IMAGE_DEPTH = 1,
  REME_IMAGE_VOLUME = 2
}
 Image type enumeration. More...
 
enum  _reme_sensor_view_t {
  REME_SENSOR_VIEW_RAW = 0,
  REME_SENSOR_VIEW_RECONSTRUCTED = 1
}
 Sensor view type. More...
 
enum  _reme_sensor_trackmode_t {
  REME_SENSOR_TRACKMODE_AUTO = 0,
  REME_SENSOR_TRACKMODE_LOCAL = 1,
  REME_SENSOR_TRACKMODE_GLOBAL = 2
}
 Sensor tracking mode. More...
 
enum  _reme_sensor_trackhint_t {
  REME_SENSOR_TRACKHINT_NONE = 0,
  REME_SENSOR_TRACKHINT_USE_GLOBAL = 1,
  REME_SENSOR_TRACKHINT_DONT_TRACK = 2
}
 Sensor tracking hint. More...
 
enum  _reme_sensor_position_t {
  REME_SENSOR_POSITION_INFRONT = 0,
  REME_SENSOR_POSITION_CENTER = 1,
  REME_SENSOR_POSITION_FLOOR = 2
}
 Predefined sensor position. More...
 
reme_error_t reme_sensor_create (reme_context_t c, const char *driver, bool require_can_open, reme_sensor_t *s)
 Create a new sensor object.
 
reme_error_t reme_sensor_destroy (reme_context_t c, reme_sensor_t *s)
 Destroy a previously created sensor object.
 
reme_error_t reme_sensor_open (reme_context_t c, reme_sensor_t s)
 Open a sensor.
 
reme_error_t reme_sensor_close (reme_context_t c, reme_sensor_t s)
 Close a an open sensor.
 
reme_error_t reme_sensor_set_volume (reme_context_t c, reme_sensor_t s, reme_volume_t v)
 Set the working volume.
 
reme_error_t reme_sensor_set_trackmode (reme_context_t c, reme_sensor_t s, reme_sensor_trackmode_t t)
 Set tracking mode.
 
reme_error_t reme_sensor_set_trackhint (reme_context_t c, reme_sensor_t s, reme_sensor_trackhint_t t)
 Sets a tracking hint.
 
reme_error_t reme_sensor_bind_camera_options (reme_context_t c, reme_sensor_t s, reme_options_t o)
 Access the sensor specific options that affect how the camera is opened.
 
reme_error_t reme_sensor_bind_capture_options (reme_context_t c, reme_sensor_t s, reme_options_t o)
 Access the sensor specific capture options with their current values.
 
reme_error_t reme_sensor_apply_capture_options (reme_context_t c, reme_sensor_t s, reme_options_t o)
 Apply capture options.
 
reme_error_t reme_sensor_bind_render_options (reme_context_t c, reme_sensor_t s, reme_options_t o)
 Access the sensor specific render options.
 
reme_error_t reme_sensor_apply_render_options (reme_context_t c, reme_sensor_t s, reme_options_t o)
 Apply render options.
 
reme_error_t reme_sensor_get_position (reme_context_t c, reme_sensor_t s, float *coordinates)
 Get the sensor position with respect to the world coordinate frame.
 
reme_error_t reme_sensor_get_recovery_position (reme_context_t c, reme_sensor_t s, float *coordinates)
 Get the sensor recovery position with respect to the world coordinate frame.
 
reme_error_t reme_sensor_get_incremental_position (reme_context_t c, reme_sensor_t s, float *coordinates)
 Get the incremental movement of the sensor.
 
reme_error_t reme_sensor_get_prescan_position (reme_context_t c, reme_sensor_t s, reme_sensor_position_t t, float *coordinates)
 Calculate a predefined sensor position with respect to the volume.
 
reme_error_t reme_sensor_set_position (reme_context_t c, reme_sensor_t s, const float *coordinates)
 Set the sensor position with respect to the world coordinate frame.
 
reme_error_t reme_sensor_set_incremental_position (reme_context_t c, reme_sensor_t s, const float *coordinates)
 Set the incremental movement of the sensor.
 
reme_error_t reme_sensor_set_recovery_position (reme_context_t c, reme_sensor_t s, const float *coordinates)
 Set the sensor recovery position with respect to the world coordinate frame.
 
reme_error_t reme_sensor_set_prescan_position (reme_context_t c, reme_sensor_t s, reme_sensor_position_t t)
 Position the sensor and volume with respect to each other using a predefined position.
 
reme_error_t reme_sensor_reset (reme_context_t c, reme_sensor_t s)
 Resets the sensor to identity position.
 
reme_error_t reme_sensor_get_image (reme_context_t c, reme_sensor_t s, reme_sensor_image_t it, reme_image_t i)
 Get a specific sensor image.
 
reme_error_t reme_sensor_is_image_supported (reme_context_t c, reme_sensor_t s, reme_sensor_image_t it, bool *result)
 Test if a specific image type is available.
 
reme_error_t reme_sensor_get_points (reme_context_t c, reme_sensor_t s, reme_sensor_view_t v, const float **coordinates, int *length)
 Get points corresponding to the current sensor view.
 
reme_error_t reme_sensor_get_point_normals (reme_context_t c, reme_sensor_t s, reme_sensor_view_t v, const float **coordinates, int *length)
 Get point normals corresponding to the current sensor view.
 
reme_error_t reme_sensor_get_point_colors (reme_context_t c, reme_sensor_t s, reme_sensor_view_t v, const float **channels, int *length)
 Get point colors corresponding to the current sensor view.
 
reme_error_t reme_sensor_get_track_time (reme_context_t c, reme_sensor_t s, int *track_time)
 Get the tracking time.
 
reme_error_t reme_sensor_grab (reme_context_t c, reme_sensor_t s)
 Trigger frame grabbing.
 
reme_error_t reme_sensor_prepare_images (reme_context_t c, reme_sensor_t s)
 Retrieve image data corresponding to the previous grab command for further processing.
 
reme_error_t reme_sensor_prepare_image (reme_context_t c, reme_sensor_t s, _reme_sensor_image_t i)
 Retrieve specific image data for subsequent processing.
 
reme_error_t reme_sensor_track_position (reme_context_t c, reme_sensor_t s)
 Attempts to track the sensor position.
 
reme_error_t reme_sensor_update_volume (reme_context_t c, reme_sensor_t s)
 Update the volume using the current sensor data.
 
reme_error_t reme_sensor_update_volume_selectively (reme_context_t c, reme_sensor_t s, bool update_surface, bool update_colors)
 Update the volume using the current sensor data selectively.
 
reme_error_t reme_sensor_find_floor (reme_context_t c, reme_sensor_t s, float *coordinates)
 Detect floor plane in current sensor data.
 

Detailed Description

Acquire data from real-world sensors or from file streams.

The sensor is responsible for acquiring real world data and feeding it into the reconstruction volume. The life-time of reme_sensor_t sensor is usually the following

The one-minute example illustrates the basic usage.

Typedef Documentation

typedef int reme_sensor_t

Handle referencing a sensor object.

A reme_sensor_t represents a RGBD sensor. Associated methods allow to create, open and interact with sensors of various types.

typedef enum _reme_sensor_image_t reme_sensor_image_t

Image type enumeration.

Each sensor might provide different frame types that are all 2D images. Not all sensors support all frames, or the number of frames supported is configuration dependant.

Sensor view type.

When dealing with sensor data ReconstructMe offers two types of views. The first view REME_SENSOR_VIEW_RAW corresponds to data passed as raw input to ReconstructMe. The second view type is REME_SENSOR_VIEW_RECONSTRUCTED and corresponds to a synthetic view generated by raytracing the volume from the current sensor to volume position.

The REME_SENSOR_VIEW_RECONSTRUCTED normally is much more smooth and has less holes than REME_SENSOR_VIEW_RAW, but naturally limits data to the size of the reconstruction volume.

Sensor tracking mode.

Defines the basic tracking strategy to find the sensor position based on current and past sensor data.

Sensor tracking hint.

Provides optional hints for the sensor tracking module.

Predefined sensor position.

Determines a predefined sensor position with respect to the current volume. See reme_sensor_get_prescan_position and reme_sensor_set_prescan_position for more information.

Enumeration Type Documentation

Image type enumeration.

Each sensor might provide different frame types that are all 2D images. Not all sensors support all frames, or the number of frames supported is configuration dependant.

Enumerator:
REME_IMAGE_AUX 

Auxiliary image if provided by sensor. Depending on the sensor type and its configuration the auxilary image can be of any type. Commonly this is either RGB or IR.

REME_IMAGE_DEPTH 

Depth image. RGB 3 channels, 1 byte per channel

REME_IMAGE_VOLUME 

Rendered image of the volume as viewed from the current sensor perspective. RGB 3 channels, 1 byte per channel.

Sensor view type.

When dealing with sensor data ReconstructMe offers two types of views. The first view REME_SENSOR_VIEW_RAW corresponds to data passed as raw input to ReconstructMe. The second view type is REME_SENSOR_VIEW_RECONSTRUCTED and corresponds to a synthetic view generated by raytracing the volume from the current sensor to volume position.

The REME_SENSOR_VIEW_RECONSTRUCTED normally is much more smooth and has less holes than REME_SENSOR_VIEW_RAW, but naturally limits data to the size of the reconstruction volume.

Enumerator:
REME_SENSOR_VIEW_RAW 

Raw data view.

REME_SENSOR_VIEW_RECONSTRUCTED 

Synthetic raytraced data view.

Sensor tracking mode.

Defines the basic tracking strategy to find the sensor position based on current and past sensor data.

Enumerator:
REME_SENSOR_TRACKMODE_AUTO 

Automatic mode. Try to use local search first. If that fails attempt to perform a global search followed by local search. If last tracking attempt was unsuccessful, start using global search immediately.

REME_SENSOR_TRACKMODE_LOCAL 

Local search. Use local search only. Local search is fast and succeeds when the camera movement between two subsequent frames is small.

REME_SENSOR_TRACKMODE_GLOBAL 

Global search. Use global search followed by a fine alignment of local search. Global search is slower than local search but succeeds in cases where the camera movement between two subsequent is rather frames large.

Sensor tracking hint.

Provides optional hints for the sensor tracking module.

Enumerator:
REME_SENSOR_TRACKHINT_NONE 

No hint

REME_SENSOR_TRACKHINT_USE_GLOBAL 

Temporarily switch to global search until tracking is found again. This hint is automatically cleared when tracking is found. It is a convinient way of letting the tracking module know that tracking should be done using global search until we are sure that tracking is found again.

REME_SENSOR_TRACKHINT_DONT_TRACK 

Temporarily skip tracking for the current invocation. This is useful to avoid growing code complexity when tracking should not occur in a certain frame. The result of setting this track hint and invoking reme_sensor_track_position is the same as not calling reme_sensor_track_position once.

Predefined sensor position.

Determines a predefined sensor position with respect to the current volume. See reme_sensor_get_prescan_position and reme_sensor_set_prescan_position for more information.

Enumerator:
REME_SENSOR_POSITION_INFRONT 

Determines a sensor position infront of the volume front plane at a distance of 400mm.

REME_SENSOR_POSITION_CENTER 

Determines a sensor position that places the sensor in the center of the volume.

REME_SENSOR_POSITION_FLOOR 

Determines a sensor position that aligns the volume on th ground floor.

Function Documentation

reme_error_t reme_sensor_create ( reme_context_t  c,
const char *  driver,
bool  require_can_open,
reme_sensor_t s 
)

Create a new sensor object.

Creates a new sensor. The driver argument specifies which sensor to open. The following sensor drivers are currently available

  • mskinect Microsoft Kinect for Windows and Kinect for XBox
  • openni Asus Xtion, Asus Xtion Pro Live, Primsense Carmine 1.08, Primesense Carmine 1.09
  • openni1 Same as above, but uses legacy OpenNI version 1 instead of OpenNI version 2
  • file A generic file sensor to read previously recorded data

You might specify a specific driver backend

"openni"

Alternatively you can specify a list of drivers to test, in which case it will return the first sensor that works.

"openni;mskinect;file"

Or, specify file containing a sensor configuration file (see reme_sensor_bind_camera_options for camera specific options and Accessing and Modifying Options for background information about options)

"c:/drivers/asus_xtion_pro_live.txt;c:/drivers/my_file.txt"

The drivers will be tested in the given order.

Set require_can_open to true to ensure the created sensor can be created and opened using either the default settings, or if file paths are specifed using the settings specified in the file.

No matter how require_can_open is set the sensor is returned in closed state. Use reme_sensor_open to open it.

Warning
require_can_open set to true can lead to longer sensor opening times, because the sensor is tested. If you know what sensor you are opening set it to false.
Parameters
cA valid context object
driverThe name of sensor driver to instance a sensor from
require_can_openEnsures that the returned sensor can be opened using either the default sensor options.
sA pointer that will receive the handle of the created sensor
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_calibration.cpp, example_reconstructmesdk_colorize.cpp, example_reconstructmesdk_image.cpp, example_reconstructmesdk_one_minute.cpp, example_reconstructmesdk_point_and_shoot_with_colors.cpp, example_reconstructmesdk_recorder.cpp, example_reconstructmesdk_scan_tilt.cpp, example_reconstructmesdk_sensor.cpp, example_reconstructmesdk_sensor_multi_independent.cpp, example_reconstructmesdk_sensor_printing.cpp, example_reconstructmesdk_sensor_threaded.cpp, example_reconstructmesdk_surface.cpp, and example_reconstructmesdk_volume.cpp.
reme_error_t reme_sensor_destroy ( reme_context_t  c,
reme_sensor_t s 
)

Destroy a previously created sensor object.

Parameters
cA pointer to a valid context object
sA mutable pointer to a valid sensor handle to destroy
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_colorize.cpp, example_reconstructmesdk_one_minute.cpp, example_reconstructmesdk_point_and_shoot_with_colors.cpp, example_reconstructmesdk_sensor.cpp, example_reconstructmesdk_sensor_printing.cpp, and example_reconstructmesdk_surface.cpp.
reme_error_t reme_sensor_open ( reme_context_t  c,
reme_sensor_t  s 
)
reme_error_t reme_sensor_close ( reme_context_t  c,
reme_sensor_t  s 
)

Close a an open sensor.

Indicates that no more grabbing is done from sensor. Re-opening a sensor is possible.

Parameters
cA valid context object
sA valid sensor object
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_colorize.cpp, example_reconstructmesdk_one_minute.cpp, example_reconstructmesdk_point_and_shoot_with_colors.cpp, example_reconstructmesdk_sensor.cpp, example_reconstructmesdk_sensor_printing.cpp, and example_reconstructmesdk_surface.cpp.
reme_error_t reme_sensor_set_volume ( reme_context_t  c,
reme_sensor_t  s,
reme_volume_t  v 
)

Set the working volume.

The sensor will use this volume to keep track of its position and it will also update into this volume. By default the sensor is assigned to the first volume available.

Parameters
cA valid context object
sA valid sensor object
vA valid volume object
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
reme_error_t reme_sensor_set_trackmode ( reme_context_t  c,
reme_sensor_t  s,
reme_sensor_trackmode_t  t 
)

Set tracking mode.

Sets the overall tracking strategy. By default REME_SENSOR_TRACKMODE_AUTO is used.

Parameters
cA valid context object
sA valid sensor object
tA valid tracking mode
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_point_and_shoot_with_colors.cpp.
reme_error_t reme_sensor_set_trackhint ( reme_context_t  c,
reme_sensor_t  s,
reme_sensor_trackhint_t  t 
)

Sets a tracking hint.

The tracking hint is an external user supplied information to support the camera tracking module. Any tracking hint given remains active until the next call to reme_sensor_track_position.

Supplying tracking hints becomes useful when the caller has external knowledge unknown to the tracking module. For example the caller might set REME_SENSOR_TRACKHINT_USE_GLOBAL to indicate that the tracking module is should resort to global tracking in the next iteration.

Tracking hints are automatically cleared after the next call to reme_sensor_track_position.

Parameters
cA valid context object
sA valid sensor object
tA valid tracking hint
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_scan_tilt.cpp, and example_reconstructmesdk_sensor_multi_independent.cpp.
reme_error_t reme_sensor_bind_camera_options ( reme_context_t  c,
reme_sensor_t  s,
reme_options_t  o 
)

Access the sensor specific options that affect how the camera is opened.

Associated Protocol Buffers Specification
Depending on the sensor chosen on create, different specs are needed, since each sensor supports a different set of parameters.

For openni sensors (OpenNI 2.x)

import "rgbdsensor/videostream.proto";
package LibRGBDSensor;
// OpenNI v2.0 sensor configuration
message openni2_sensor_config {
// Default driver model
optional string driver = 1 [default = "openni"];
// Device ID
optional int32 device_id = 2 [default = 0];
optional LibRGBDSensor.videostream depth_stream = 3;
optional LibRGBDSensor.videostream aux_stream = 4;
// Map depth viewpoint to aux viewpoints.
optional bool enable_align_viewpoints = 5 [default = true];
}

For mskinect sensors

import "rgbdsensor/videostream.proto";
package LibRGBDSensor;
// Kinect sensor configuration based on MSKinect driver
message mskinect_sensor_config {
// Default driver model
optional string driver = 1 [default = "mskinect"];
// Device ID
optional int32 device_id = 2 [default = 0];
optional LibRGBDSensor.videostream depth_stream = 3;
optional LibRGBDSensor.videostream aux_stream = 4;
// Map depth viewpoint to aux viewpoints.
optional bool enable_align_viewpoints = 5 [default = true];
// Enable near mode
optional bool enable_near_mode = 6 [default = true];
}

For file sensors

import "rgbdsensor/videostream.proto";
package LibRGBDSensor;
// OpenNI sensor configuration
message file_sensor_config {
// Default driver model
optional string driver = 1 [default = "file"];
optional LibRGBDSensor.videostream depth_stream = 3;
optional LibRGBDSensor.videostream aux_stream = 4;
// Map depth viewpoint to aux viewpoints.
optional bool enable_align_viewpoints = 5 [default = false];
}

For openni sensors (OpenNI 1.x)

import "rgbdsensor/videostream.proto";
package LibRGBDSensor;
// OpenNI 1.0 sensor configuration
message openni_sensor_config {
// Default driver model
optional string driver = 1 [default = "openni1"];
// Device ID
optional int32 device_id = 2 [default = 0];
optional LibRGBDSensor.videostream depth_stream = 3;
optional LibRGBDSensor.videostream aux_stream = 4;
// Map depth viewpoint to aux viewpoints.
optional bool enable_align_viewpoints = 5 [default = true];
}

Common to all configurations above is the usage of the LibRGBDSensor.videostream

import "rgbdsensor/intrinsics.proto";
package LibRGBDSensor;
// Defines properties of a video stream
message videostream {
// Video stream type
enum stream_type {
STREAM_DEPTH = 1;
STREAM_COLOR = 2;
STREAM_IR = 3;
}
// Type of stream
optional stream_type type = 1 [default = STREAM_DEPTH];
// Enable or disable this stream
optional bool enabled = 2 [default = true];
// Defines the where the stream is c
optional string source = 3 [default = ""];
// Image size description
message image_size_type {
optional int32 width = 1 [default = 640];
optional int32 height = 2 [default = 480];
};
// Defines the initial image size
optional image_size_type image_size = 4;
// Unit description
enum units_type {
// No units
UNIT_UNKNOWN = 0;
// Unit is milli-meter
UNIT_1MM = 1;
// Unit is 0.1 milli-meter
UNIT_100UM = 2;
}
// Depth dimension
optional units_type units = 5 [default = UNIT_UNKNOWN];
// Mirror image
optional bool mirror_enabled = 6 [default = false];
// Target FPS
optional int32 fps = 7 [default = 30];
// Stream intrinsics
optional LibRGBDSensor.intrinsics intrinsics = 8;
}

The referenced LibRGBDSensor.intrinsics has the following layout

package LibRGBDSensor;
// Camera intrinsics
message intrinsics {
// Image size this intrinsics applies to.
optional int32 width = 1 [default = 640];
optional int32 height = 2 [default = 480];
// Camera matrix
optional double fx = 3 [default = 571.26];
optional double fy = 4 [default = 571.26];
optional double cx = 5 [default = 320];
optional double cy = 6 [default = 240];
// Radial distortion coefficients
// Set to zero to disable certain calculations
optional double k1 = 7 [default = 0];
optional double k2 = 8 [default = 0];
optional double k3 = 9 [default = 0];
// Tangential distortion coefficients
// Set to zero to disable certain calculations
optional double p1 = 10 [default = 0];
optional double p2 = 11 [default = 0];
}
Note
When radial or tangential distortion parameters are not equal zero a distortion correction will be performed.
Changing camera options take effect on the next call to reme_sensor_open.
Parameters
cA valid context object
sA valid sensor object
oA valid options binding object
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_calibration.cpp, example_reconstructmesdk_colorize.cpp, example_reconstructmesdk_point_and_shoot_with_colors.cpp, example_reconstructmesdk_recorder.cpp, and example_reconstructmesdk_sensor.cpp.
reme_error_t reme_sensor_bind_capture_options ( reme_context_t  c,
reme_sensor_t  s,
reme_options_t  o 
)

Access the sensor specific capture options with their current values.

Associated Protocol Buffers Specification

Depending on the sensor chosen on create, different specs are needed, since each sensor supports a different set of parameters.

For openni sensors

import "rgbdsensor/basic_frame_support.proto";
package LibRGBDSensor;
// OpenNI sensor capture parameter description
message openni_sensor_capture_parameter {
// Frame specific information. Read-only.
optional LibRGBDSensor.basic_frame_support frame_info = 1;
// Infrared raw intensity multiplier. Read-Write.
optional int32 ir_alpha = 2 [default = 4];
}

For mskinect sensors

import "rgbdsensor/basic_frame_support.proto";
package LibRGBDSensor;
// Microsoft Kinect for Windows capture parameter description
message mskinect_sensor_capture_parameter {
// Frame specific information. Read-only.
optional LibRGBDSensor.basic_frame_support frame_info = 1;
// Enable IR projector. Read-Write.
optional bool enable_ir_projector = 2 [default = true];
// Infrared raw intensity multiplier. Read-Write.
optional int32 ir_alpha = 3 [default = 10];
// Elevation or tilt angle. Read-Write.
// Extends the view range to to +/- 27 degrees. A value of 0 means perpendicular to gravity.
// The movement of the motor is performed asynchronously. Only one asnychronous tilt movement
// it allowed at any time. Too frequent changes to the tilt angle can cause damage to the motor.
// Note that the IR projector is turned off while tilting, essentially making ReconstructMe blind
// while the sensor moves.
optional int32 tilt_angle = 4 [default = 0];
// Sets/Gets a boolean that determines if automatic exposure is enabled.
optional bool auto_exposure = 5 [default = true];
// Sets/Gets boolean that determines if automatic white balance is enabled.
optional bool auto_white_balance = 6 [default = true];
// Sets/Gets the exposure time in increments of 1/10,000 of a second.
// The range is [0.0, infinity];
optional float exposure_time = 7 [default = 300];
// Sets/Gets the white balance, which is a color temperature in degrees Kelvin.
// The range is [2700, 6500];
optional float white_balance = 8 [default = 4500];
}

For file sensors

import "rgbdsensor/basic_frame_support.proto";
package LibRGBDSensor;
// File sensor capture parameter description
message file_sensor_capture_parameter {
// Frame specific information. Read-only.
optional LibRGBDSensor.basic_frame_support frame_info = 1;
}

Common to all configurations above is the frame_info field. It is mendatory for reconstruction and has the following structure

package LibRGBDSensor;
// Contains information on basic sensor image support.
message basic_frame_support {
// Image size type
message image_size {
optional int32 width = 1 [default = 640];
optional int32 height = 2 [default = 480];
};
// True when sensor supports depth stream
optional bool supports_depth = 1;
// True when sensor supports an auxilary stream
optional bool supports_aux = 2;
// Frame size of depth image in pixels
optional image_size depth_size = 3;
// Frame size of auxilary image in pixels
optional image_size aux_size = 4;
}
Note
The capture options are updated with current value when reme_sensor_bind_capture_options is called.
Changing capture options without a call to reme_sensor_apply_capture_options has no effect.
Parameters
cA valid context object
sA valid sensor object
oA valid options binding object
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_scan_tilt.cpp, and example_reconstructmesdk_sensor.cpp.
reme_error_t reme_sensor_apply_capture_options ( reme_context_t  c,
reme_sensor_t  s,
reme_options_t  o 
)

Apply capture options.

Causes all capture options set to be applied at given sensor. In order to set only specific options, invoke reme_options_clear on the options target before setting the new values. Then invoke reme_sensor_apply_capture_options.

Parameters
cA valid context object
sA valid sensor object
oA valid options binding object
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_scan_tilt.cpp, and example_reconstructmesdk_sensor.cpp.
reme_error_t reme_sensor_bind_render_options ( reme_context_t  c,
reme_sensor_t  s,
reme_options_t  o 
)

Access the sensor specific render options.

These options define how sensor images are rendered. Use reme_sensor_apply_render_options to apply value changes.

Associated Protocol Buffers Specification
// Render options
message render_options {
// Color type
message color4_type {
// Red channel [0..1]
required float r = 1;
// Green channel [0..1]
required float g = 2;
// Blue channel [0..1]
required float b = 3;
// Alpha channel [0..1]
required float a = 4;
}
// How shading of the volume image is performed.
enum shade_type {
// Shade points using Blinn-Phong modell
SHADE_PHONG = 0;
// Shade points according to normal direction.
SHADE_NORMALS = 1;
// Colorize pixels using input color. This is only
// a valid options if colors are supported.
SHADE_COLORS = 2;
}
// Clear color for volume images.
// By default this maps to a dark-gray color.
optional color4_type clear_color = 1;
// Diffuse color for volume images. Only used
// when SHADE_PHONG is enabled.
// By default this maps to a pastel-blue color.
optional color4_type diffuse_color = 2;
// Border color. Defines the color to apply to color borders,
// which are found in areas without complete texture/geometry
// information.
// This color only applies when using SHADE_COLORS.
optional color4_type border_color = 3;
// Shading mode.
optional shade_type shade_mode = 4 [default = SHADE_PHONG];
}
Parameters
cA valid context object
sA valid sensor object
oA valid options binding object
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_colorize.cpp, and example_reconstructmesdk_point_and_shoot_with_colors.cpp.
reme_error_t reme_sensor_apply_render_options ( reme_context_t  c,
reme_sensor_t  s,
reme_options_t  o 
)

Apply render options.

Causes all render options to be applied at given sensor.

Parameters
cA valid context object
sA valid sensor object
oA valid options binding object
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_colorize.cpp, and example_reconstructmesdk_point_and_shoot_with_colors.cpp.
reme_error_t reme_sensor_get_position ( reme_context_t  c,
reme_sensor_t  s,
float *  coordinates 
)

Get the sensor position with respect to the world coordinate frame.

Initially this is set to identity for all sensors. The position is modified when reme_sensor_track_position succeeds, or reme_sensor_set_position is called.

Parameters
cA valid context object
sA valid sensor object
coordinatesA constant pointer to constant Transform data.
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_scan_tilt.cpp, and example_reconstructmesdk_sensor_multi_independent.cpp.
reme_error_t reme_sensor_get_recovery_position ( reme_context_t  c,
reme_sensor_t  s,
float *  coordinates 
)

Get the sensor recovery position with respect to the world coordinate frame.

Whenever the sensor cannot find track, it puts itself into recovery pose. It then waits in the recovery pose for tracking to succeed. The recovery pose will be updated during ongoing tracking automatically. I.e when there is sufficient confidence that the last n-frames where tracked successfully, ReconstructMe generates a new recovery pose.

Initially this is set to identity for all sensors. The position is modified when reme_sensor_track_position succeeds several times, or reme_sensor_set_recovery_position is called.

Parameters
cA valid context object
sA valid sensor object
coordinatesA constant pointer to constant Transform data.
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
reme_error_t reme_sensor_get_incremental_position ( reme_context_t  c,
reme_sensor_t  s,
float *  coordinates 
)

Get the incremental movement of the sensor.

When reme_sensor_track_position succeeds this position reflects the movement of the sensor between the last position of the sensor and the current position of the sensor.

Parameters
cA valid context object
sA valid sensor object
coordinatesA constant pointer to pre-allocated Transform data.
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
reme_error_t reme_sensor_get_prescan_position ( reme_context_t  c,
reme_sensor_t  s,
reme_sensor_position_t  t,
float *  coordinates 
)

Calculate a predefined sensor position with respect to the volume.

This method calculates a sensor position based on the value of reme_sensor_position_t. Depending on the choice of reme_sensor_position_t, this method makes use the the attached volume and/or the last sensor depth-map.

The meaning of reme_sensor_position_t is as follows:

  • REME_SENSOR_POSITION_INFRONT Assume without loss of generality that the sensor held horizontally pointing towards the target. Then the position is chosen so that z-axis of the world coordinate is the natural up-direction, the sensor looks towards the positive y-axis of the world coordinate system, the sensor is located at the center of the front face of the reconstruction volume and is moved back (negative y-axis) by 400 units.
  • REME_SENSOR_POSITION_CENTER The sensor is placed in the center of the volume.
  • REME_SENSOR_POSITION_FLOOR The sensor is placed such that the volume is pinned to the floor according to reme_sensor_find_floor. This type makes use of the current depth-map to determine the floor.
Parameters
cA valid context object
sA valid sensor object
tType of predefined position to set
coordinatesA pointer to pre-allocated Transform data.
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_NO_FLOORIf no floor is found in the current sensor's depthmap
REME_ERROR_UNSPECIFIEDOn failure
reme_error_t reme_sensor_set_position ( reme_context_t  c,
reme_sensor_t  s,
const float *  coordinates 
)

Set the sensor position with respect to the world coordinate frame.

Initially this is set to identity for all sensors.

Parameters
cA valid context object
sA valid sensor object
coordinatesA pointer to constant Transform data.
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_scan_tilt.cpp, example_reconstructmesdk_sensor_multi_independent.cpp, and example_reconstructmesdk_sensor_printing.cpp.
reme_error_t reme_sensor_set_incremental_position ( reme_context_t  c,
reme_sensor_t  s,
const float *  coordinates 
)

Set the incremental movement of the sensor.

Updates the world position of the sensor by the incremental position specified.

Parameters
cA valid context object
sA valid sensor object
coordinatesA pointer to constant Transform data.
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
reme_error_t reme_sensor_set_recovery_position ( reme_context_t  c,
reme_sensor_t  s,
const float *  coordinates 
)

Set the sensor recovery position with respect to the world coordinate frame.

Whenever the sensor cannot find track, it puts itself into recovery pose. It then waits in the recovery pose for tracking to succeed. The recovery pose will be updated during ongoing tracking automatically. I.e when there is sufficient confidence that the last n-frames where tracked successfully, ReconstructMe generates a new recovery pose.

Initially this is set to identity for all sensors. The position is modified when reme_sensor_track_position succeeds several times, or reme_sensor_set_recovery_position is called.

Parameters
cA valid context object
sA valid sensor object
coordinatesA constant pointer to constant Transform data.
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_sensor_printing.cpp.
reme_error_t reme_sensor_set_prescan_position ( reme_context_t  c,
reme_sensor_t  s,
reme_sensor_position_t  t 
)

Position the sensor and volume with respect to each other using a predefined position.

Initially the sensor position is identity for all sensors. By calling this method the sensor position and recovery position change to an auto-calculated sensor position based on the value of reme_sensor_position_t.

This method is considered a helper method consisting of the following steps:

Please refer to reme_sensor_get_prescan_position for an in-depth explanation of reme_sensor_position_t choices.

Parameters
cA valid context object
sA valid sensor object
tType of predefined position to set
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_NO_FLOORIf no floor is found in the current sensor's depthmap
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_colorize.cpp, example_reconstructmesdk_one_minute.cpp, example_reconstructmesdk_point_and_shoot_with_colors.cpp, example_reconstructmesdk_recorder.cpp, example_reconstructmesdk_scan_tilt.cpp, example_reconstructmesdk_sensor_multi_independent.cpp, example_reconstructmesdk_sensor_threaded.cpp, example_reconstructmesdk_surface.cpp, and example_reconstructmesdk_volume.cpp.
reme_error_t reme_sensor_reset ( reme_context_t  c,
reme_sensor_t  s 
)

Resets the sensor to identity position.

Forces the sensor to loose track.

Parameters
cA valid context object
sA valid sensor object
Return values
REME_ERROR_SUCCESSOn tracking success or the corresponding volume is empty.
REME_ERROR_UNSPECIFIEDis returned otherwise.
Examples:
example_reconstructmesdk_scan_tilt.cpp, and example_reconstructmesdk_volume.cpp.
reme_error_t reme_sensor_get_image ( reme_context_t  c,
reme_sensor_t  s,
reme_sensor_image_t  it,
reme_image_t  i 
)

Get a specific sensor image.

Each sensor might provide different frame types. Not all sensors support all frames, or the number of frames supported is configuration dependant. See reme_sensor_image_t for a complete enumeration of available image types.

Memory Management Rules Exception

The returned image remains valid until the sensor is destroyed or the dimension of the image changes. The pointer is recycled internally, which means that it will point to different values each time the sensor images are updated.

Parameters
cA valid context object
sA valid sensor object
itImage type to access
iA valid image object to receive image data
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_calibration.cpp, example_reconstructmesdk_colorize.cpp, example_reconstructmesdk_image.cpp, example_reconstructmesdk_one_minute.cpp, example_reconstructmesdk_point_and_shoot_with_colors.cpp, example_reconstructmesdk_recorder.cpp, example_reconstructmesdk_scan_tilt.cpp, example_reconstructmesdk_sensor.cpp, example_reconstructmesdk_sensor_multi_independent.cpp, example_reconstructmesdk_sensor_printing.cpp, example_reconstructmesdk_sensor_threaded.cpp, example_reconstructmesdk_surface.cpp, and example_reconstructmesdk_volume.cpp.
reme_error_t reme_sensor_is_image_supported ( reme_context_t  c,
reme_sensor_t  s,
reme_sensor_image_t  it,
bool *  result 
)

Test if a specific image type is available.

Each sensor might provide different frame types. Not all sensors support all frames, or the number of frames supported is configuration dependant. See reme_sensor_image_t for a complete enumeration of available image types.

Parameters
cA valid context object
sA valid sensor object
itImage type to access
resultWhether image type is supported or not.
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
reme_error_t reme_sensor_get_points ( reme_context_t  c,
reme_sensor_t  s,
reme_sensor_view_t  v,
const float **  coordinates,
int *  length 
)

Get points corresponding to the current sensor view.

Depending on the choice of v (reme_sensor_view_t) this returns

The points are represented as an array of floats where each point consists of 4 coordinates Px Py Pz Pw Px Py Pz Pw ... . The w component is always zero. The i-th point starts at index i * 4 of coordinate array returned.

The number of points returned corresponds to the number of pixels of the native sensor. That is, if your sensor has a resolution of 640x480 (cols x rows) the number of returned normals is 640 * 480. The normals are returned in row-wise order. Those points that do not represent a valid data are marked with a sentinel value (NAN) in their x-coordinate. To access the point corresponding to pixel of row i and column j use i * cols * 4 + j * 4 where cols is the number of pixel columns of the native sensor.

Parameters
cA valid context object
sA valid sensor object
vview type specification
coordinatesA mutable pointer to constant point data.
lengthThe number of coordinates returned. To get the number of points divide this value by 4.
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
reme_error_t reme_sensor_get_point_normals ( reme_context_t  c,
reme_sensor_t  s,
reme_sensor_view_t  v,
const float **  coordinates,
int *  length 
)

Get point normals corresponding to the current sensor view.

Depending on the choice of v (reme_sensor_view_t) this returns

The point normals are represented as an array of floats where each point consists of 4 coordinates Px Py Pz Pw Px Py Pz Pw ... . The w component is always zero. The i-th normal starts at index i * 4 of coordinate array returned.

The number of normals returned corresponds to the number of pixels of the native sensor. That is, if your sensor has a resolution of 640x480 (cols x rows) the number of returned normals is 640 * 480. The normals are returned in row-wise order. Those normals that do not represent a valid data are marked with a sentinel value (NAN) in their x-coordinate. To access the normal corresponding to pixel of row i and column j use i * cols * 4 + j * 4 where cols is the number of pixel columns of the native sensor.

Parameters
cA valid context object
sA valid sensor object
vview type specification
coordinatesA mutable pointer to constant normal data.
lengthThe number of coordinates returned. To get the number of normals divide this value by 4.
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
reme_error_t reme_sensor_get_point_colors ( reme_context_t  c,
reme_sensor_t  s,
reme_sensor_view_t  v,
const float **  channels,
int *  length 
)

Get point colors corresponding to the current sensor view.

Depending on the choice of v (reme_sensor_view_t) this returns

Colors are only available if the current compiled context supports colorization of vertices.

The point colors are represented as an array of floats where each color consists of 4 channels r g b a r g b a ... . The a component is always zero. The i-th color starts at index i * 4 of array returned. The range for each channel is [0..1].

The number of colors returned corresponds to the number of pixels of the native sensor. That is, if your sensor has a resolution of 640x480 (cols x rows) the number of returned colors is 640 * 480. The colors are returned in row-wise order. To access the color corresponding to pixel of row i and column j use i * cols * 4 + j * 4 where cols is the number of pixel columns of the native sensor.

Parameters
cA valid context object
sA valid sensor object
vview type specification
channelsA mutable pointer to constant color data.
lengthThe number of channels returned. To get the number of colors divide this value by 4.
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_NO_COLOR_SUPPORTif no color is available.
REME_ERROR_UNSPECIFIEDOn failure
reme_error_t reme_sensor_get_track_time ( reme_context_t  c,
reme_sensor_t  s,
int *  track_time 
)

Get the tracking time.

The tracking time corresponds to the number of frames in which tracking succeeded. In case tracking fails the counter gets negative.

Parameters
cA valid context object
sA valid sensor object
track_timeThe tracking time
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
reme_error_t reme_sensor_grab ( reme_context_t  c,
reme_sensor_t  s 
)

Trigger frame grabbing.

Provides a synchronization point to trigger image generation of all image types. This method is intended to be fast.

In order to synchronize frame grabbing from multiple sensors, call this method in sequence for each sensor before calling reme_sensor_retrieve.

Parameters
cA valid context object
sA valid sensor object
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_FAILED_TO_GRABFailed to grab from sensor. This is not necessarily an error, you might re-try.
REME_ERROR_UNSPECIFIEDIs returned otherwise.
Examples:
example_reconstructmesdk_calibration.cpp, example_reconstructmesdk_colorize.cpp, example_reconstructmesdk_image.cpp, example_reconstructmesdk_one_minute.cpp, example_reconstructmesdk_point_and_shoot_with_colors.cpp, example_reconstructmesdk_recorder.cpp, example_reconstructmesdk_scan_tilt.cpp, example_reconstructmesdk_sensor.cpp, example_reconstructmesdk_sensor_multi_independent.cpp, example_reconstructmesdk_sensor_printing.cpp, example_reconstructmesdk_sensor_threaded.cpp, example_reconstructmesdk_surface.cpp, and example_reconstructmesdk_volume.cpp.
reme_error_t reme_sensor_prepare_images ( reme_context_t  c,
reme_sensor_t  s 
)

Retrieve image data corresponding to the previous grab command for further processing.

Updates the internal state of all images and prepares the required data structures on the computation device.

Parameters
cA valid context object
sA valid sensor object
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_colorize.cpp, example_reconstructmesdk_one_minute.cpp, example_reconstructmesdk_recorder.cpp, example_reconstructmesdk_scan_tilt.cpp, example_reconstructmesdk_sensor_multi_independent.cpp, example_reconstructmesdk_sensor_printing.cpp, example_reconstructmesdk_sensor_threaded.cpp, example_reconstructmesdk_surface.cpp, and example_reconstructmesdk_volume.cpp.
reme_error_t reme_sensor_prepare_image ( reme_context_t  c,
reme_sensor_t  s,
_reme_sensor_image_t  i 
)

Retrieve specific image data for subsequent processing.

In case REME_IMAGE_AUX or REME_IMAGE_DEPTH is passed, this method will fetch the data into internal memory. In case REME_IMAGE_VOLUME is passed, the previously prepared REME_IMAGE_DEPTH will be uploaded to the computation device for subsequent processing (reme_sensor_track_position, reme_sensor_update_volume).

This method is especially useful (when compared to reme_sensor_prepare_images) when only depth and image data is required. For example when recording, there is no need for REME_IMAGE_VOLUME and it should be skipped so no time is wasted waiting for the data to be uploaded to the computation device.

Parameters
cA valid context object
sA valid sensor object
iImage type to prepare
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_calibration.cpp, example_reconstructmesdk_image.cpp, example_reconstructmesdk_point_and_shoot_with_colors.cpp, example_reconstructmesdk_recorder.cpp, and example_reconstructmesdk_sensor.cpp.
reme_error_t reme_sensor_track_position ( reme_context_t  c,
reme_sensor_t  s 
)

Attempts to track the sensor position.

Tries to track the sensor movement by matching the current depth data against the perspective from the last position. Initially the sensor position is the identity position, unless otherwise specified.

The tracking behaviour is affected by the tracking strategy (reme_sensor_set_trackmode) and any external tracking hint (reme_sensor_set_trackhint). Any tracking hint will be cleared after calling this method.

Parameters
cA valid context object
sA valid sensor object
Return values
REME_ERROR_SUCCESSOn tracking success or the corresponding volume is empty.
REME_ERROR_TRACK_LOSTWhen the tracking did not succeed. In this case the sensor is repositioned into latest recovery pose.
REME_ERROR_UNSPECIFIEDis returned otherwise.
Examples:
example_reconstructmesdk_colorize.cpp, example_reconstructmesdk_one_minute.cpp, example_reconstructmesdk_point_and_shoot_with_colors.cpp, example_reconstructmesdk_recorder.cpp, example_reconstructmesdk_scan_tilt.cpp, example_reconstructmesdk_sensor_multi_independent.cpp, example_reconstructmesdk_sensor_printing.cpp, example_reconstructmesdk_sensor_threaded.cpp, example_reconstructmesdk_surface.cpp, and example_reconstructmesdk_volume.cpp.
reme_error_t reme_sensor_update_volume ( reme_context_t  c,
reme_sensor_t  s 
)

Update the volume using the current sensor data.

Uses the current sensor position as the perspective to update the volume. If color support is enabled this method will also update the colors. Use reme_sensor_update_volume_selectively to change that behaviour.

Parameters
cA valid context object
sA valid sensor object
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_UNSPECIFIEDOn failure
Examples:
example_reconstructmesdk_colorize.cpp, example_reconstructmesdk_one_minute.cpp, example_reconstructmesdk_point_and_shoot_with_colors.cpp, example_reconstructmesdk_recorder.cpp, example_reconstructmesdk_scan_tilt.cpp, example_reconstructmesdk_sensor_multi_independent.cpp, example_reconstructmesdk_sensor_printing.cpp, example_reconstructmesdk_sensor_threaded.cpp, example_reconstructmesdk_surface.cpp, and example_reconstructmesdk_volume.cpp.
reme_error_t reme_sensor_update_volume_selectively ( reme_context_t  c,
reme_sensor_t  s,
bool  update_surface,
bool  update_colors 
)

Update the volume using the current sensor data selectively.

Uses the current sensor position as the perspective to update the volume.

Parameters
cA valid context object
sA valid sensor object
update_surfaceIf true updates geometric part of the volume.
update_colorsIf true updates color part of the volume.
sA valid sensor object
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_NO_COLOR_SUPPORTWhen update_colors is selected but color support is disabled.
REME_ERROR_UNSPECIFIEDOn failure
reme_error_t reme_sensor_find_floor ( reme_context_t  c,
reme_sensor_t  s,
float *  coordinates 
)

Detect floor plane in current sensor data.

This method attempts to detect the floor plane in the current sensor data. It uses the sensor's depth data from the last invocation of reme_sensor_prepare_images or reme_sensor_prepare_image with REME_IMAGE_DEPTH as input and if successful returns a coordinate system of the floor with respect to the sensor coordinate system.

The algorithm works best when a large portion of the image is covered by floor data and the sensor is held without roll (i.e no rotation around the sensor's z-axis). Note vertical front facing walls can be detected erroneously as floors if the make up the major part of the sensor image.

The floor is returned as a coordinate frame with the following properties:

  • the origin is located at the intersection of the sensor view direction and the estimated floor plane
  • the z-axis is normal to to the floor plane and points towards the natural ceiling
  • the x-axis is aligned with the sensor's x-axis
  • the y-axis is formed by the cross product of the former two axes.
Parameters
cA valid context object
sA valid sensor object
coordinatesA constant pointer to constant Transform data.
Return values
REME_ERROR_SUCCESSOn success
REME_ERROR_NO_FLOORIf no floor is found in the current sensor's depth-map
REME_ERROR_UNSPECIFIEDOn failure