您好:
我在网上看到您的opencv透视变换的博客,
https://www.cnblogs.com/jsxyhelu/p/4219564.html,
我是opencv小菜鸟一个,现在想要得到一个图片变形之后保存,整个图片信息不丢失,即四个角的信息不丢失应该怎么做?原图中某一点在新图中坐标应该怎么计算?万望不吝赐教,不胜感激,万分感谢。
你好:
我按照您的代码和网上找到的python代码(
https://blog.csdn.net/fengxueniu/article/details/77964375)修改并实现的基本的透视变换,但基本原理是将原图test0.png加边界,边界值与透视之后的背景一致,是图片看起来信息为丢失。但图像旋转之后的预期效果如test1.png所示,而实际生成的图片如test2.png所示,现在想要实现从test0.png到test1.png的变换并求test0.png图中某一点坐标值在test1.png中位置,应该怎么做?对于透视变换不是特别理解。万望不吝赐教,万分感谢。
图片与代码附注如下。
=》
现有代码
#-*- coding:utf-8 -*-
#-*- coding:utf-8 -*-
import cv2
import numpy
as np
import random
def
rad(
x):
return x*np.pi/
180
img = cv2.imread(
"test0.png")
cv2.imshow(
"original", img)
#扩展图像,保证内容不超出可视范围
img = cv2.copyMakeBorder(img,
100,
100,
100,
100,cv2.BORDER_CONSTANT,(
255,
255,
255))
w,h=img.shape[
0:
2]
cv2.imshow(
"original1", img)
#anglex=45
#angley = 45
#anglez = 0
#fov = 42
anglex=random.uniform(
30,
60)
angley =random.uniform(
30,
60)
anglez =random.uniform(
30,
60)
fov = random.uniform(
30,
60)
print(anglex,angley,anglez,fov)
#镜头与图像间的距离,21为半可视角,算z的距离是为了保证在此可视角度下恰好显示整幅图像
z=np.sqrt(w**
2 + h**
2)/
2/np.tan(rad(fov/
2))
#齐次变换矩阵
rx = np.array([[
1,
0,
0,
0],
[
0, np.cos(anglex*np.pi/
180), -np.sin(anglex*np.pi/
180),
0],
[
0, -np.sin(anglex*np.pi/
180), np.cos(anglex*np.pi/
180),
0,],
[
0,
0,
0,
1]], np.float32)
ry = np.array([[np.cos(angley*np.pi/
180),
0, np.sin(angley*np.pi/
180),
0],
[
0,
1,
0,
0],
[-np.sin(angley*np.pi/
180),
0, np.cos(angley*np.pi/
180),
0,],
[
0,
0,
0,
1]], np.float32)
rz = np.array([[np.cos(anglez*np.pi/
180), np.sin(anglez*np.pi/
180),
0,
0],
[-np.sin(anglez*np.pi/
180), np.cos(anglez*np.pi/
180),
0,
0],
[
0,
0,
1,
0],
[
0,
0,
0,
1]], np.float32)
r = rx.dot(ry).dot(rz)
#四对点的生成
pcenter = np.array([h/
2, w/
2,
0,
0], np.float32)
p1 = np.array([
0,
0,
0,
0], np.float32) - pcenter
p2 = np.array([w,
0,
0,
0], np.float32) - pcenter
p3 = np.array([
0,h,
0,
0], np.float32) - pcenter
p4 = np.array([w,h,
0,
0], np.float32) - pcenter
dst1 = r.dot(p1)
dst2 = r.dot(p2)
dst3 = r.dot(p3)
dst4 = r.dot(p4)
list_dst = [dst1, dst2, dst3, dst4]
org = np.array([[
0,
0],
[w,
0],
[
0,h],
[w,h]], np.float32)
dst = np.zeros((
4,
2), np.float32)
#投影至成像平面
for i
in
range(
4):
dst[i,
0] = list_dst[i][
0]*z/(z-list_dst[i][
2]) + pcenter[
0]
dst[i,
1] = list_dst[i][
1]*z/(z-list_dst[i][
2]) + pcenter[
1]
warpR = cv2.getPerspectiveTransform(org, dst)
print(org)
print(dst)
result = cv2.warpPerspective(img, warpR, (h,w),(
0,
255,
0))
print(result.shape)
cv2.imshow(
"result", result)
cv2.imencode(
'.png',result)[
1].tofile(
'test2.png')
c=cv2.waitKey(
0)
cv2.destroyAllWindows()
这个问题的出现,在于对透视变化原理的理解;可能还有一些调试的技巧。
int _tmain(int argc, _TCHAR* argv[])
{
Mat src = imread("E:/sandbox/test0.png");
copyMakeBorder(src,src,10,10,10,10,BORDER_CONSTANT);
if (!src.data)
return 0;
vector<Point> not_a_rect_shape;
not_a_rect_shape.push_back(Point(10,10));
not_a_rect_shape.push_back(Point(74,10));
not_a_rect_shape.push_back(Point(74,77));
not_a_rect_shape.push_back(Point(10,77));
cv::Point2f src_vertices[4];
src_vertices[0] = not_a_rect_shape[0];
src_vertices[1] = not_a_rect_shape[1];
src_vertices[2] = not_a_rect_shape[2];
src_vertices[3] = not_a_rect_shape[3];
Point2f dst_vertices[4];
dst_vertices[0] = Point(0, 10);
dst_vertices[1] = Point(64,0);
dst_vertices[2] = Point(84,77);
dst_vertices[3] = Point(10,87);
Mat warpMatrix = getPerspectiveTransform(src_vertices, dst_vertices);
cv::Mat rotated;
warpPerspective(src, rotated, warpMatrix, rotated.size(), INTER_LINEAR, BORDER_CONSTANT);
// Display the image
cv::namedWindow( "Original Image");
cv::imshow( "Original Image",src);
cv::namedWindow( "warp perspective");
cv::imshow( "warp perspective",rotated);
cv::waitKey();
return 0;
}
结果:
目前方向:图像拼接融合、图像识别 联系方式:jsxyhelu@foxmail.com