theta_input = data[:,0]*3.14/180 theta_fit = np.arctan(self.data[:,1]/0.95) #focal_lenth=0.95 distort_data, _ = curve_fit(func1, theta_input, theta_fit) 综上,我们通过曲线拟合的方法得到了畸变参数。
@param K Camera intrinsic matrix f$cameramatrix{K}f$. @param D Input vector of distortion coefficients f$distcoeffsfisheyef$. @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 1-channel or 1x1 3-channel @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4) @param size Undistorted image size. @param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See convertMaps() for details. @param map1 The first output map. @param map2 The second output map. */ CV_EXPORTS_W void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P, const cv::Size& size, int m1type, OutputArray map1, OutputArray map2); 相机内参矩阵表示如下,其中 表示相机焦距 f 与相机cmos参数 的比值,这个 的物理意义为每个像素的实际长度,单位可以是mm/像素。 表示相机主点,即光心与图像平面相交的坐标,单位为像素。那么问题来了,为什么既需要鱼眼相机的内参,又需要输出图像的相机内参呢,它们之间是什么关系呢?最开始的时候,很多同学肯定是把这两个相机内参设置成一样的,即都设置成鱼眼相机的大小,如下图所示。代码中去畸变之后图像的内参是从鱼眼相机内参深拷贝过来的。
cv::Mat R = cv::eye(3, 3, CV_32F); cv::Mat mapx_open, mapy_open; cv::Mat intrinsic_undis; fish_intrinsic.copyTo(intrinsic_undis); //intrinsic_undis.at<float>(0,2) *= 2; //intrinsic_undis.at<float>(1,2) *= 2; cv::initUndistortRectifyMap( fish_intrinsic, m_undis2fish_params, R, intrinsic_undis, cv::Size(intrinsic_undis.at<float>(0, 2) * 2, intrinsic_undis.at<float>(1, 2) * 2), CV_32FC1, mapx_open, mapy_open); cv::Mat test; cv::remap(disImg[3], test, mapx_open, mapy_open, cv::INTER_LINEAR); 左侧为鱼眼图,右侧为去畸变图
cv::Mat R = cv::eye(3, 3, CV_32F); cv::Mat mapx_open, mapy_open; cv::Mat intrinsic_undis; fish_intrinsic.copyTo(intrinsic_undis); intrinsic_undis.at<float>(0,2) *= 2; intrinsic_undis.at<float>(1,2) *= 2; cv::initUndistortRectifyMap( fish_intrinsic, m_undis2fish_params, R, intrinsic_undis, cv::Size(intrinsic_undis.at<float>(0, 2) * 2, intrinsic_undis.at<float>(1, 2) * 2), CV_32FC1, mapx_open, mapy_open); cv::Mat test; cv::remap(disImg[3], test, mapx_open, mapy_open, cv::INTER_LINEAR); 去畸变图像相机参数的主点扩大了两倍,同时生成图像大小扩到两倍从上图中我们依然不能获得到右侧完整的黑色大方格,因此需要进一步扩大去畸变后图像相机主点位置以及生成图像的分辨率:
cv::Mat R = cv::eye(3, 3, CV_32F); cv::Mat mapx_open, mapy_open; cv::Mat intrinsic_undis; fish_intrinsic.copyTo(intrinsic_undis); intrinsic_undis.at<float>(0,2) *= 4; intrinsic_undis.at<float>(1,2) *= 4; cv::initUndistortRectifyMap( fish_intrinsic, m_undis2fish_params, R, intrinsic_undis, cv::Size(intrinsic_undis.at<float>(0, 2) * 2, intrinsic_undis.at<float>(1, 2) * 2), CV_32FC1, mapx_open, mapy_open); cv::Mat test; cv::remap(disImg[3], test, mapx_open, mapy_open, cv::INTER_LINEAR); 现在我已经把去畸变图像相机内参的主点扩大为fish相机内参的4倍了,生成图像的长宽也放大了4倍,像素数量总体放大16倍,这样才勉强把大方格完全显示出来。我们知道提取角点需要用到图像处理算法,显然对这么大的图像做处理的效率非常低。
cv::Mat R = cv::eye(3, 3, CV_32F); cv::Mat mapx_open, mapy_open; cv::Mat intrinsic_undis; fish_intrinsic.copyTo(intrinsic_undis); intrinsic_undis.at<float>(0, 0) /= 4; intrinsic_undis.at<float>(1, 1) /= 4; /*intrinsic_undis.at<float>(0,2) *= 4; intrinsic_undis.at<float>(1,2) *= 4;*/ cv::initUndistortRectifyMap( fish_intrinsic, m_undis2fish_params, R, intrinsic_undis, cv::Size(intrinsic_undis.at<float>(0, 2) * 2, intrinsic_undis.at<float>(1, 2) * 2), CV_32FC1, mapx_open, mapy_open); cv::Mat test; cv::remap(disImg[3], test, mapx_open, mapy_open, cv::INTER_LINEAR); 左侧为鱼眼图,右侧为去畸变图,分辨率均为1280*960从图中可以看出,当我们仅将相机焦距缩小时,可以看到更多的东西。虽然去畸变之后的图像很小只有1280*960,但是却可以看到完整的方格。本节我们讨论了opencv API initUndistortRectifyMap函数的主点和f参数调节对于去畸变图像的影响,接下来的第3节,我们将会从去畸变算法原理入手,C++实现一波该算法。做这件事的原因很简单:opencv只提供了整张图像从undis2fish的映射,在avm的视角转换中,我们需要进行单个像素点的undis2fish,因此,我们需要自己实现一波这个去畸变过程。结论:缩小相机焦距可以使FOV增大,在更小分辨率的图像上呈现出更多的内容,看上去也是更加清晰。
/* func: warp from distort to undistort @param f_dx:f/dx @param f_dy:f/dy @param large_center_h: undis image center y @param large_center_w: undis image center x @param fish_center_h: fish image center y @param fish_center_w: fish image center x @param undis_param: factory param @param x: input coordinate x on the undis image @param y: input coordinate y on the undis image */ cv::Vec2f warpUndist2Fisheye(float fish_scale, float f_dx, float f_dy, float large_center_h, float large_center_w, float fish_center_h, float fish_center_w, cv::Vec4d undis_param, float x, float y) { f_dx *= fish_scale; f_dy *= fish_scale; float y_ = (y - large_center_h) / f_dy; // normalized plane float x_ = (x - large_center_w) / f_dx; float r_ = static_cast<float>(sqrt(pow(x_, 2) + pow(y_, 2))); // Look up table /*int num = atan(r_) / atan(m_d) * 1024; float angle_distorted = m_Lut[num];*/ float angle_undistorted = atan(r_); // theta float angle_undistorted_p2 = angle_undistorted * angle_undistorted; float angle_undistorted_p3 = angle_undistorted_p2 * angle_undistorted; float angle_undistorted_p5 = angle_undistorted_p2 * angle_undistorted_p3; float angle_undistorted_p7 = angle_undistorted_p2 * angle_undistorted_p5; float angle_undistorted_p9 = angle_undistorted_p2 * angle_undistorted_p7; float angle_distorted = static_cast<float>(angle_undistorted + undis_param[0] * angle_undistorted_p3 + undis_param[1] * angle_undistorted_p5 + undis_param[2] * angle_undistorted_p7 + undis_param[3] * angle_undistorted_p9); // scale float scale = angle_distorted / (r_ + 0.00001f); // scale = r_dis on the camera img plane // divide r_undis on the normalized plane cv::Vec2f warp_xy; float xx = (x - large_center_w) / fish_scale; float yy = (y - large_center_h) / fish_scale; warpPointOpencv(warp_xy, fish_center_h, fish_center_w, xx, yy, scale); return warp_xy; } void warpPointOpencv(cv::Vec2f &warp_xy, float map_center_h, float map_center_w, float x_, float y_, float scale) { warp_xy[0] = x_ * scale + map_center_w; warp_xy[1] = y_ * scale + map_center_h; } 针对上述代码,我们由浅入深地讲述算法流程
#forward self.distor_para, _ = curve_fit(self.func, self.data[:, 0],self.data[:, 1]) #inverse f_inverse_para, _ = curve_fit(self.func_inverse, self.data[:, 1], self.data[:, 0]) 计算fish2undis的过程与undis2fish(3.1,3.2)的过程略有不同,但都是寻找 与 之间的映射关系,因为 f 平面才是我们真实拿到的fish图,我们最终还是要在这个原始的fish图上找点。实现代码:
cv::Vec2f CalibrateInit::warpFisheye2Undist(float fish_scale, float f_dx, float f_dy, float undis_center_h, float undis_center_w, float fish_center_h, float fish_center_w, cv::Vec4d undis_param, float x, float y) { // f_dx *= fish_scale; // f_dy *= fish_scale; float y_ = (y - fish_center_h) / f_dy; // normalized plane float x_ = (x - fish_center_w) / f_dx; float r_distorted = static_cast<float>(sqrt(pow(x_, 2) + pow(y_, 2))); float r_distorted_p2 = r_distorted * r_distorted; float r_distorted_p3 = r_distorted_p2 * r_distorted; float r_distorted_p4 = r_distorted_p2 * r_distorted_p2; float r_distorted_p5 = r_distorted_p2 * r_distorted_p3; float angle_undistorted = static_cast<float>(r_distorted + undis_param[0] * r_distorted_p2 + undis_param[1] * r_distorted_p3 + undis_param[2] * r_distorted_p4 + undis_param[3] * r_distorted_p5); // scale float r_undistorted = tanf(angle_undistorted); float scale = r_undistorted / (r_distorted + 0.00001f); // scale = r_dis on the camera img plane // divide r_undis on the normalized plane cv::Vec2f warp_xy; float xx = (x - fish_center_w) * fish_scale; float yy = (y - fish_center_h) * fish_scale; warpPointInverse(warp_xy, undis_center_h, undis_center_w, xx, yy, scale); return warp_xy; } void CalibrateInit::Vec2f& warp_xy, float map_center_h, float map_center_w, float x_, float y_, float scale) { warp_xy[0] = x_ * scale + map_center_w; warp_xy[1] = y_ * scale + map_center_h; }
审核编辑 :李倩
全部0条评论
快来发表一下你的评论吧 !