Getting Started in C#
The C/C++ examples require CMake and a development environment. On Windows this is provided by Visual Studio 2017 or newer. On Linux (Ubuntu):
sudo apt-get install build-essential cmake
To get started, first clone the repository and build and run the first example using CMake:
git clone https://github.com/NVIDIA-Omniverse/ovrtx.git
cd ovrtx/examples/c/minimal
cmake -B build
Then, on Windows:
cmake --build build --config Release
.\build\Release\minimal.exe
On Linux:
cmake --build build --config Release
./build/minimal
The minimal example shows how to create the renderer, load an OpenUSD scene and render a single image, copying the results back to the CPU for writing out as a PNG.
The resulting image will be written to ./out.png and can be inspected with any image viewer.
Note that the first time a program built against ovrtx is run, it will compile and cache necessary shaders, which may take some time depending on your system. Subsequent runs will use the cached shaders and will be fast.
Installation#
CMake#
ovrtx binary distributions can be found on the GitHub Releases page, and contain a CMake config.
Alternatively, download the appropriate package for your system from the Releases page and point
CMAKE_PREFIX_PATH to the directory where you extracted the archive and use find_package(ovrtx)
from your CMakeLists.txt.
For other build systems, download the appropriate package for your system from the Releases page. The headers are in the
include directory and libraries are in lib and bin, in either static or dynamic flavors.
The simplest way to add ovrtx as a dependency to your project is using CMake FetchContent:
macro(ovrtx_fetch)
set(FETCHCONTENT_QUIET FALSE)
# Override FetchContent's base directory to share large deps among examples.
# Uses the directory where ovrtx.cmake lives, ensuring all examples share the same _deps.
# If copying this project to your own workspace, delete this line or override it.
if(NOT DEFINED CACHE{FETCHCONTENT_BASE_DIR})
set(FETCHCONTENT_BASE_DIR "${_OVRTX_CMAKE_DIR}/_deps" CACHE PATH "Shared FetchContent directory")
endif()
# Platform-specific package selection
if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
set(OVRTX_PACKAGE_SYSTEM "windows-x86_64")
set(OVRTX_HASH "40ea3e4b2b180117cb3d8488686d461b840435c0d94c3792351c1bcf216bffac")
elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux")
if (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
set(OVRTX_PACKAGE_SYSTEM "manylinux_2_35_aarch64")
set(OVRTX_HASH "13b11c0c62f04b82f624db91274ea323d7a0e362defce625f9302982df5b5a54")
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
set(OVRTX_PACKAGE_SYSTEM "manylinux_2_35_x86_64")
set(OVRTX_HASH "a37115a2da1528c4fa302b84fa243b6d0004f5db781420d92944676543add79d")
else()
message(FATAL_ERROR "Unsupported system: ${CMAKE_SYSTEM_NAME} ${CMAKE_SYSTEM_PROCESSOR}")
endif()
else()
message(FATAL_ERROR "Unsupported system: ${CMAKE_SYSTEM_NAME} ${CMAKE_SYSTEM_PROCESSOR}")
endif()
include(FetchContent)
FetchContent_Declare(
ovrtx
URL "https://github.com/NVIDIA-Omniverse/ovrtx/releases/download/v0.1.0/ovrtx@0.1.0.${OVRTX_PACKAGE_SYSTEM}.zip"
URL_HASH SHA256=${OVRTX_HASH}
DOWNLOAD_EXTRACT_TIMESTAMP TRUE
)
FetchContent_MakeAvailable(ovrtx)
# Make ovrtx findable by find_package
list(APPEND CMAKE_PREFIX_PATH ${ovrtx_SOURCE_DIR})
find_package(ovrtx REQUIRED)
endmacro()
# Setup runtime dependencies for a target (DLL copying, junctions, rpath)
function(ovrtx_setup_runtime TARGET_NAME)
if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
# Copy ovrtx DLL to build directory
add_custom_command(TARGET ${TARGET_NAME} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy_if_different
"${ovrtx_SOURCE_DIR}/bin/ovrtx-dynamic.dll"
"$<TARGET_FILE_DIR:${TARGET_NAME}>"
COMMENT "Copying ovrtx-dynamic.dll to build directory"
)
Note that the macro above is provided for convenience in ovrtx.cmake in the examples/c/cmake directory in the repository.
Runtime Packaging and Deployment#
ovrtx requires several libraries and other runtime dependencies to be present and discoverable at runtime. These are all included in the ovrtx binary distribution under the bin directory:
bin/
├── libovrtx-dynamic.so / ovrtx-dynamic.dll
├── cache/
├── library/
├── libs/
├── mdl/
├── plugins/
├── rendering-data/
└── usd_plugins/
The ovrtx dynamic library will automatically load the other dependencies at runtime if it is placed alongside them as in the binary distribution. If you need to deploy your application with a different layout, you can point ovrtx to the correct paths using the ovrtx_config_entry_binary_package_root_path() helper function when configuring the renderer:
ovrtx_renderer_config_entry_t config_entries[] = {
ovrtx_config_entry_binary_package_root_path(ovx_string("/path/where/bin/contents/live"))
};
ovrtx_config_t config;
config.entries = config_entries;
config.entry_count = sizeof(config_entries) / sizeof(config_entries[0]);
ovrtx_renderer_t* renderer;
ovrtx_result_t result = ovrtx_create_renderer(&config, &renderer);
Note that when static linking ovrtx, you MUST provide the binary package root path or ovrtx will not be able to find the required dependencies at runtime.
Minimal Example#
#include <ovrtx/ovrtx_config.h>
#include <ovrtx/ovrtx_types.h>
#include <ovrtx/ovrtx.h>
#include <cstring>
#include <thread>
#include <stdexcept>
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#define THROW_ON_ERROR(RESULT, OPERATION) \
do { \
if (RESULT.status == OVRTX_API_ERROR) { \
ovx_string_t error = ovrtx_get_last_error(); \
char error_msg[512]; \
if (error.ptr && error.length > 0) { \
snprintf(error_msg, \
sizeof(error_msg), \
"ovrtx %s failed: %.*s", \
OPERATION, \
static_cast<int>(error.length), \
error.ptr); \
} else { \
snprintf(error_msg, \
sizeof(error_msg), \
"ovrtx %s failed", \
OPERATION); \
} \
throw std::runtime_error(error_msg); \
} \
} while (0)
// Find the handle of the given output in the given set of outputs
static ovrtx_rendered_output_handle_t
find_output(ovrtx_render_product_set_outputs_t const& outputs,
char const* output_to_find) {
ovrtx_rendered_output_handle_t output_handle = -1;
for (size_t i = 0; i < outputs.output_count; ++i) {
ovrtx_render_product_output_t const& product_output =
outputs.outputs[i];
for (size_t f = 0; f < product_output.output_frame_count; ++f) {
ovrtx_render_product_frame_output_t const& frame =
product_output.output_frames[f];
for (size_t v = 0; v < frame.render_var_count; ++v) {
ovrtx_render_product_render_var_output_t const& var =
frame.output_render_vars[v];
if (var.render_var_name.ptr &&
strncmp(var.render_var_name.ptr,
output_to_find,
var.render_var_name.length) == 0) {
output_handle = var.output_handle;
break;
}
}
}
}
if (output_handle == -1) {
throw std::runtime_error("LdrColor output not found");
}
return output_handle;
}
int main() {
ovrtx_renderer_t* renderer = nullptr;
ovrtx_result_t result;
// Create the renderer, optionally providing configuration settings.
// In this case we need no configuration.
ovrtx_config_t config {};
result = ovrtx_create_renderer(&config, &renderer);
THROW_ON_ERROR(result, "create_renderer");
// Load a USD layer into the renderer.
//
// As well as just passing a URI to an existing layer, we can pass a USDA
// string in order to compose a Stage at runtime. This can be very useful
// for dynamically creating the RenderProducts etc. that define the render
// output rather than editing the original layer to add them.
//
// A real application might want to load the USD layer and traverse it to
// find either existing RenderProducts, and/or Cameras and allow the user to
// select which one to render, and which RenderVars to output.
//
// Here, we know there's a suitable camera at /World/Camera, so we'll just
// create a RenderProduct referencing that and add the LdrColor RenderVar to
// get some output.
ovrtx_usd_handle_t usd_handle{};
ovrtx_usd_input_t usd_input{};
char const* usd_url = "https://omniverse-content-production.s3.us-west-2.amazonaws.com/Samples/Robot-OVRTX/robot-ovrtx.usda";
usd_input.usd_file_path = {usd_url, strlen(usd_url)};
ovrtx_enqueue_result_t enqueue_result =
ovrtx_add_usd(renderer, usd_input, {"", 0}, &usd_handle);
// This operation is asynchronous as loading the USD may take a long time.
// We'll just poll every 100ms till it's done.
ovrtx_op_wait_result_t wait_result;
result = ovrtx_wait_op(
renderer, enqueue_result.op_index, ovrtx_timeout_t{0}, &wait_result);
THROW_ON_ERROR(result, "wait_op");
while (ovrtx_wait_op(renderer, enqueue_result.op_index, ovrtx_timeout_t{0},
&wait_result).status == OVRTX_API_TIMEOUT) {
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
// We render a frame by stepping the renderer.
//
// Any sensors whose exposures end during this step will generate a frame
// that will be available in the step result. Since the camera in the loaded
// USD layer is instantaneous (does not specify motion blur or rolling
// shutter), it will generate a frame every time the render is stepped.
//
// To step the renderer we need to tell ovrtx which RenderProducts we're
// interested in, which in this case is the RenderProduct we defined in the
// loading layer.
ovrtx_render_product_set_t render_products = {};
ovx_string_t render_product_str = {"/Render/Camera", strlen("/Render/Camera")};
render_products.render_products = &render_product_str;
render_products.num_render_products = 1;
ovrtx_step_result_handle_t step_result_handle = 0;
enqueue_result =
ovrtx_step(renderer, render_products, 1.0 / 60.0, &step_result_handle);
THROW_ON_ERROR(enqueue_result, "step");
// Wait for the render to complete. Here we'll just block until it's done.
result = ovrtx_wait_op(renderer,
enqueue_result.op_index,
ovrtx_timeout_infinite,
&wait_result);
THROW_ON_ERROR(result, "wait_op");
ovrtx_render_product_set_outputs_t outputs = {};
result = ovrtx_fetch_results(
renderer, step_result_handle, ovrtx_timeout_infinite, &outputs);
THROW_ON_ERROR(result, "fetch_results");
// Find LdrColor in outputs
ovrtx_rendered_output_handle_t ldrcolor_output_handle =
find_output(outputs, "LdrColor");
// Map rendered output so that it can be accessed on the CPU
ovrtx_map_output_description_t map_desc = {};
map_desc.device_type = OVRTX_MAP_DEVICE_TYPE_CPU;
ovrtx_rendered_output_t rendered_output = {};
result = ovrtx_map_rendered_output(renderer,
ldrcolor_output_handle,
&map_desc,
ovrtx_timeout_infinite,
&rendered_output);
THROW_ON_ERROR(result, "map_rendered_output");
// The output is returned as a DLTensor
DLTensor const& tensor = rendered_output.buffer.dl;
int width = static_cast<int>(tensor.shape[1]);
int height = static_cast<int>(tensor.shape[0]);
stbi_write_png("out.png",
width,
height,
/* components = */ 4,
tensor.data,
/* row stride in bytes = */ 4 * width);
// Unmap output
ovrtx_cuda_sync_t no_sync = {};
result = ovrtx_unmap_rendered_output(
renderer, rendered_output.map_handle, no_sync);
THROW_ON_ERROR(result, "unmap_rendered_output");
// Clean up resources (ovrtx will warn if results are leaked)
result = ovrtx_destroy_results(renderer, step_result_handle);
result = ovrtx_destroy_renderer(renderer);
THROW_ON_ERROR(result, "destroy_renderer");
return 0;
}
Next Steps#
Explore more Examples including the Vulkan Interop example with real-time GPU rendering.
See the C API Reference for the full C API reference.