datetime:2025/12/29 20:53
author:nzb

该项目来源于mujoco_learning

传感器数据获取

sensordata的索引需要依靠mjData的sensor_adr获取,这个可以使用sensor的id 这个我们要注意传感器具有的数据量,有的传感器是一个值,而有的传感器是三个值。我们可以使用mjModel中的sensor_dim获得传感器输出的参数量

*演示:*
std::vector<float> get_sensor_data(const mjModel *model, const mjData *data,
                                   const std::string &sensor_name) {
  int sensor_id = mj_name2id(model, mjOBJ_SENSOR, sensor_name.c_str());
  if (sensor_id == -1) {
    std::cout << "no found sensor" << std::endl;
    return std::vector<float>();
  }
  int data_pos = model->sensor_adr[sensor_id];
  std::vector<float> sensor_data(model->sensor_dim[sensor_id]);
  for (int i = 0; i < sensor_data.size(); i++) {
    sensor_data[i] = data->sensordata[data_pos + i];
  }
  return sensor_data;
}

对于传感器其他的属性在 mjModel中可以直接获得。如下:

读取相机画面

  相机来源一般是在模型文件中创建相机,或者创建一个相机手动控制,就像 base中 与人交互的画面就是手动创建的相机。我们读取相机的步骤为:

  1. 获取相机 ID
  2. 确定图像大小
  3. 渲染
  4. 读取图像
  5. 通过 opencv将图像转成 Mat
    MJAPI void mjr_readPixels(unsigned char* rgb, float* depth,
    mjrRect viewport, const mjrContext* con);
    
    将渲染画面转成rgb图像和深度图像。 获取相机视角演示: 相机初始化:
    mjvCamera cam2;//bsae中全局变量
    int camID = mj_name2id(m, mjOBJ_CAMERA, "cam2armor1");
    if (camID == -1)
    {
    std::cerr << "Camera not found" << std::endl;
    }
    else
    {
    std::cout << "Camera ID: " << camID << std::endl;
    // 获取摄像机的位置
    const double *cam_pos = &m->cam_pos[3 * camID];
    std::cout << "Camera Position: (" << cam_pos[0] << ", " << cam_pos[1] << ", " << cam_pos[2] << ")" << std::endl;
    // 获取摄像机的视野角度
    double cam_fovy = m->cam_fovy[camID];
    std::cout << "Camera FOV Y: " << cam_fovy << " degrees" << std::endl;
    // 给相机初始化
    mjv_defaultCamera(&cam2);
    // 这里给相机id和类型即可,mujoco就能找到相机位置
    cam2.fixedcamid = camID;
    cam2.type = mjCAMERA_FIXED;
    }
    
    获取图像:
    // 设置图像大小,要小于opengl窗口大小,否则会图像出现问题
    int width = 1200;
    int height = 900;
    mjrRect viewport2 = {0, 0, width, height};
    // mujoco更新渲染
    mjv_updateCamera(m, d, &cam2, &scn);
    mjr_render(viewport2, &scn, &con);
    // 渲染完成读取图像
    unsigned char *rgbBuffer = new unsigned char[width * height * 3];
    float *depthBuffer = new float[width * height]; //深度图
    mjr_readPixels(rgbBuffer, depthBuffer, viewport2, &con);
    cv::Mat image(height, width, CV_8UC3, rgbBuffer);
    // 反转图像以匹配OpenGL渲染坐标系
    cv::flip(image, image, 0);
    // 颜色顺序转换这样要使用bgr2rgb而不是rgb2bgr
    cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
    cv::imshow("Image", image);
    cv::waitKey(1);
    // 释放内存
    delete[] rgbBuffer;
    delete[] depthBuffer;
    

代码

  • sensor_data.cc
#include "opencv4/opencv2/opencv.hpp"
#include <chrono>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <iostream>
#include <thread>

#include <GLFW/glfw3.h>
#include <mujoco/mujoco.h>

// MuJoCo data structures
mjModel *m = NULL; // MuJoCo model
mjData *d = NULL;  // MuJoCo data
mjvCamera cam;     // abstract camera
mjvOption opt;     // visualization options
mjvScene scn;      // abstract scene
mjrContext con;    // custom GPU context

// mouse interaction
bool button_left = false;
bool button_middle = false;
bool button_right = false;
double lastx = 0;
double lasty = 0;

// keyboard callback
void keyboard(GLFWwindow *window, int key, int scancode, int act, int mods) {
  // backspace: reset simulation
  if (act == GLFW_PRESS && key == GLFW_KEY_BACKSPACE) {
    mj_resetData(m, d);
    mj_forward(m, d);
  }
}

// mouse button callback
void mouse_button(GLFWwindow *window, int button, int act, int mods) {
  // update button state
  button_left =
      (glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_LEFT) == GLFW_PRESS);
  button_middle =
      (glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_MIDDLE) == GLFW_PRESS);
  button_right =
      (glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_RIGHT) == GLFW_PRESS);

  // update mouse position
  glfwGetCursorPos(window, &lastx, &lasty);
}

// mouse move callback
void mouse_move(GLFWwindow *window, double xpos, double ypos) {
  // no buttons down: nothing to do
  if (!button_left && !button_middle && !button_right) {
    return;
  }

  // compute mouse displacement, save
  double dx = xpos - lastx;
  double dy = ypos - lasty;
  lastx = xpos;
  lasty = ypos;

  // get current window size
  int width, height;
  glfwGetWindowSize(window, &width, &height);

  // get shift key state
  bool mod_shift = (glfwGetKey(window, GLFW_KEY_LEFT_SHIFT) == GLFW_PRESS ||
                    glfwGetKey(window, GLFW_KEY_RIGHT_SHIFT) == GLFW_PRESS);

  // determine action based on mouse button
  mjtMouse action;
  if (button_right) {
    action = mod_shift ? mjMOUSE_MOVE_H : mjMOUSE_MOVE_V;
  } else if (button_left) {
    action = mod_shift ? mjMOUSE_ROTATE_H : mjMOUSE_ROTATE_V;
  } else {
    action = mjMOUSE_ZOOM;
  }

  // move camera
  mjv_moveCamera(m, action, dx / height, dy / height, &scn, &cam);
}

// scroll callback
void scroll(GLFWwindow *window, double xoffset, double yoffset) {
  // emulate vertical mouse motion = 5% of window height
  mjv_moveCamera(m, mjMOUSE_ZOOM, 0, -0.05 * yoffset, &scn, &cam);
}

std::vector<float> get_sensor_data(const mjModel *model, const mjData *data,
                                   const std::string &sensor_name) {
  int sensor_id = mj_name2id(model, mjOBJ_SENSOR, sensor_name.c_str());
  if (sensor_id == -1) {
    std::cout << "no found sensor" << std::endl;
    return std::vector<float>();
  }
  int data_pos = model->sensor_adr[sensor_id];
  std::vector<float> sensor_data(model->sensor_dim[sensor_id]);
  for (int i = 0; i < sensor_data.size(); i++) {
    sensor_data[i] = data->sensordata[data_pos + i];
  }
  return sensor_data;
}

// main function
int main(int argc, const char **argv) {

    char error[1000] = "Could not load binary model";
    m = mj_loadXML("../../../../API-MJCF/pointer.xml", 0, error, 1000);

    // make data
    d = mj_makeData(m);

    // init GLFW
    if (!glfwInit()) {
        mju_error("Could not initialize GLFW");
    }

    // create window, make OpenGL context current, request v-sync
    GLFWwindow *window = glfwCreateWindow(1200, 900, "Demo", NULL, NULL);
    glfwMakeContextCurrent(window);
    glfwSwapInterval(1);

    // initialize visualization data structures
    mjv_defaultCamera(&cam);
    mjv_defaultOption(&opt);
    mjv_defaultScene(&scn);
    mjr_defaultContext(&con);

    // create scene and context
    mjv_makeScene(m, &scn, 2000);
    mjr_makeContext(m, &con, mjFONTSCALE_150);

    // install GLFW mouse and keyboard callbacks
    glfwSetKeyCallback(window, keyboard);
    glfwSetCursorPosCallback(window, mouse_move);
    glfwSetMouseButtonCallback(window, mouse_button);
    glfwSetScrollCallback(window, scroll);

    //相机初始化
    mjvCamera cam2; // bsae中全局变量
    int camID = mj_name2id(m, mjOBJ_CAMERA, "this_camera");
    if (camID == -1) {
        std::cerr << "Camera not found" << std::endl;
    } else {
        std::cout << "Camera ID: " << camID << std::endl;
        // 获取摄像机的位置
        const double *cam_pos = &m->cam_pos[3 * camID];
        std::cout << "Camera Position: (" << cam_pos[0] << ", " << cam_pos[1]
                    << ", " << cam_pos[2] << ")" << std::endl;
        // 获取摄像机的视野角度
        double cam_fovy = m->cam_fovy[camID];
        std::cout << "Camera FOV Y: " << cam_fovy << " degrees" << std::endl;
        // 给相机初始化
        mjv_defaultCamera(&cam2);
        // 这里给相机id和类型即可
        cam2.fixedcamid = camID;
        cam2.type = mjCAMERA_FIXED;
    }

    auto step_start = std::chrono::high_resolution_clock::now();
    while (!glfwWindowShouldClose(window)) {

        d->ctrl[1] = 2;
        mj_step(m, d);

        // auto data = get_sensor_data(m, d, "linvel");
        auto data = get_sensor_data(m, d, "ang_vel");
        std::cout << "data:";
        for (auto d : data)
        std::cout << "  " << d;
        std::cout << std::endl;

        // 设置图像大小,要小于opengl窗口大小,否则会图像出现问题
        int width = 640;
        int height = 480;
        mjrRect viewport2 = {0, 0, width, height};
        // mujoco更新渲染
        mjv_updateCamera(m, d, &cam2, &scn); // 指定到要渲染的相机
        mjr_render(viewport2, &scn, &con);
        // 渲染完成读取图像
        unsigned char *rgbBuffer = new unsigned char[width * height * 3];
        float *depthBuffer = new float[width * height];
        mjr_readPixels(rgbBuffer, depthBuffer, viewport2, &con);

        // 创建浮点类型的深度图像
        cv::Mat depth_float(height, width, CV_32FC1, depthBuffer);
        // 定义深度范围(0-8米)
        const float min_depth_m = 0.0f; // 最小深度(0米)
        const float max_depth_m = 8.0f; // 最大深度(8米)
        // 假设相机参数(根据你的实际设置调整)
        const float near_clip = 0.1f; // 近裁剪面(米)
        const float far_clip = 50.0f; // 远裁剪面(米)
        // 将非线性深度缓冲区值转换为线性深度(米)
        cv::Mat linear_depth_m = depth_float.clone();
        linear_depth_m = far_clip * near_clip /
                        (far_clip - (far_clip - near_clip) * linear_depth_m);

        // 裁剪深度到0-8米范围
        cv::Mat depth_clipped = linear_depth_m.clone();
        depth_clipped.setTo(min_depth_m,
                            linear_depth_m < min_depth_m); // 小于0设为0
        depth_clipped.setTo(max_depth_m,
                            linear_depth_m > max_depth_m); // 大于8设为8

        // 映射0-8米到0-255像素值(距离越小越亮)
        cv::Mat depth_visual;
        // 计算映射参数:scale = 255/(max_depth_m - min_depth_m), shift = 0
        float scale = 255.0f / (max_depth_m - min_depth_m);
        // 反转映射:距离越小值越大(越亮)
        cv::Mat inverted_depth = max_depth_m - depth_clipped;
        inverted_depth.convertTo(depth_visual, CV_8UC1, scale);
        cv::flip(depth_visual, depth_visual, 0);
        // 显示深度图
        cv::imshow("deep img", depth_visual);

        cv::Mat image(height, width, CV_8UC3, rgbBuffer);
        // 反转图像以匹配OpenGL渲染坐标系
        cv::flip(image, image, 0);
        // 颜色顺序转换这样要使用bgr2rgb而不是rgb2bgr
        cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
        cv::imshow("Image", image);
        cv::waitKey(1);
        // 释放内存
        delete[] rgbBuffer;
        delete[] depthBuffer;

        //同步时间
        auto current_time = std::chrono::high_resolution_clock::now();
        double elapsed_sec =
            std::chrono::duration<double>(current_time - step_start).count();
        double time_until_next_step = m->opt.timestep * 5 - elapsed_sec;
        if (time_until_next_step > 0.0) {
        auto sleep_duration = std::chrono::duration<double>(time_until_next_step);
        std::this_thread::sleep_for(sleep_duration);
        }

        // get framebuffer viewport
        mjrRect viewport = {0, 0, 0, 0};
        glfwGetFramebufferSize(window, &viewport.width, &viewport.height);

        // update scene and render
        mjv_updateScene(m, d, &opt, NULL, &cam, mjCAT_ALL, &scn);
        mjr_render(viewport, &scn, &con);

        // swap OpenGL buffers (blocking call due to v-sync)
        glfwSwapBuffers(window);

        // process pending GUI events, call GLFW callbacks
        glfwPollEvents();
    }

    // free visualization storage
    mjv_freeScene(&scn);
    mjr_freeContext(&con);

    // free MuJoCo model and data
    mj_deleteData(d);
    mj_deleteModel(m);

    // terminate GLFW (crashes with Linux NVidia drivers)
    #if defined(__APPLE__) || defined(_WIN32)
    glfwTerminate();
    #endif

    return 1;
}
  • CMakeLists.txt
cmake_minimum_required(VERSION 3.20)
project(MUJOCO_T)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/simulate)

#编译安装,从cmake安装位置opt使用

# 设置 MuJoCo 的路径
set(MUJOCO_PATH "/home/nzb/programs/mujoco-3.3.0")
# 包含 MuJoCo 的头文件
include_directories(${MUJOCO_PATH}/include)
# 设置 MuJoCo 的库路径
link_directories(${MUJOCO_PATH}/bin)
set(MUJOCO_LIB ${MUJOCO_PATH}/lib/libmujoco.so)

find_package(OpenCV REQUIRED)

add_executable(sensor_data sensor_data.cc )
#从cmake安装位置opt使用
target_link_libraries(sensor_data ${MUJOCO_LIB} glut GL GLU glfw ${OpenCV_LIBS})

results matching ""

    No results matching ""