Parcourir la source

合并opencv代码

syh il y a 5 ans
Parent
commit
8d0265f9d2

+ 9 - 8
jeecg-boot-module-system/pom.xml

@@ -157,11 +157,6 @@
         <version>3.17</version>
         </dependency>
 
-        <dependency>
-            <groupId>com.alibaba</groupId>
-            <artifactId>easyexcel</artifactId>
-            <version>1.1.2-beat1</version>
-        </dependency>
 
         <dependency>
             <groupId>com.bstek.ureport</groupId>
@@ -175,10 +170,16 @@
             <version>3.17</version>
         </dependency>-->
         <!--<dependency>
-            <groupId>cn.com.ctop</groupId>
-            <artifactId>moudule-opencv</artifactId>
-            <version>4.20.0</version>
+            <groupId>com.test</groupId>
+            <artifactId>opencv</artifactId>
+            <scope>system</scope>
+            <systemPath>${basedir}/docs/opencv-420.jar</systemPath>
         </dependency>-->
+        <dependency>
+            <groupId>cn.com.ctop</groupId>
+            <artifactId>opencv</artifactId>
+            <version>4.2.0</version>
+        </dependency>
     </dependencies>
 
     <dependencyManagement>

+ 282 - 0
jeecg-boot-module-system/src/main/java/org/jeecg/modules/demo/opencv/Avutils.java

@@ -0,0 +1,282 @@
+package org.jeecg.modules.demo.opencv;
+
+public class Avutils {
+    /**
+     * 平面YUV 4:2:0,12bpp,(1 Cr&每2×2个样本的Cb样本)
+     **/
+    public static int AV_PIX_FMT_YUV420P = 0;
+    /**
+     * 打包YUV 4:2:2,16bpp,Y0 Cb Y1 Cr
+     */
+    public static int AV_PIX_FMT_YUYV422 = 1;
+    /**
+     * 打包RGB 8:8:8,24bpp,RGBRGB ...
+     */
+    public static int AV_PIX_FMT_RGB24 = 2;
+    /**
+     * 打包RGB 8:8:8,24bpp,BGRBGR ...
+     */
+    public static int AV_PIX_FMT_BGR24 = 3;
+    /**
+     * 平面YUV 4:2:2,16bpp,(每2×1个样本1 Cr& Cb样本)
+     */
+    public static int AV_PIX_FMT_YUV422P = 4;
+    /**
+     * 平面YUV 4:4:4,24bpp,(每1×1个样本1个Cr& Cb样本)
+     */
+    public static int AV_PIX_FMT_YUV444P = 5;
+    /**
+     * 平面YUV 4:1 :0,9bpp,(每4x4Y样品1 Cr& Cb样品)
+     */
+    public static int AV_PIX_FMT_YUV410P = 6;
+    /**
+     * 平面YUV 4:1:1,12bpp,(1 Cr& Cb每4x1 Y样本的样本)
+     **/
+    public static int AV_PIX_FMT_YUV411P = 7;
+    /**
+     * Y,8bpp
+     */
+    public static int AV_PIX_FMT_GRAY8 = 8;
+    /**
+     * Y,1bpp,0为白色,1是黑色的,每个字节的像素都是从msb到lsb的顺序
+     */
+    public static int AV_PIX_FMT_MONOWHITE = 9;
+    /**
+     * Y,1bpp,0为黑色,1为白色,每个字节中的像素从msb排序到lsb
+     */
+    public static int AV_PIX_FMT_MONOBLACK = 10;
+    /**
+     * 8使用PIX_FMT_RGB32调色板
+     */
+    public static int AV_PIX_FMT_PAL8 = 11;
+    /**
+     * 平面YUV 4:2:0,12bpp,满刻度(JPEG),弃用PIX_FMT_YUV420P并设置color_range
+     */
+    public static int AV_PIX_FMT_YUVJ420P = 12;
+    /**
+     * 平面YUV 4:2:2,16bpp,满量程(JPEG),弃用PIX_FMT_YUV422P并设置color_range
+     */
+    public static int AV_PIX_FMT_YUVJ422P = 13;
+    /**
+     * 平面YUV 4:4:4,24bpp,满量程(JPEG),不利于PIX_FMT_YUV444P并设置color_range
+     */
+    public static int AV_PIX_FMT_YUVJ444P = 14;
+    /**
+     * 通过公共数据包的XVideo运动加速通过
+     */
+    public static int AV_PIX_FMT_XVMC_MPEG2_MC = 15;
+    public static int AV_PIX_FMT_XVMC_MPEG2_IDCT = 16;
+    /**
+     * 打包YUV 4:2:2,16bpp,Cb Y0 Cr Y1
+     */
+    public static int AV_PIX_FMT_UYVY422 = 17;
+    /**
+     * 打包YUV 4:1:1,12bpp,Cb Y0 Y1 Cr Y2 Y3
+     */
+    public static int AV_PIX_FMT_UYYVYY411 = 18;
+    /**
+     * 打包RGB 3:3:2,8bpp,(msb)2B 3G 3R(lsb)
+     */
+    public static int AV_PIX_FMT_BGR8 = 19;
+    /**
+     * 打包RGB 1:2:1比特流,4bpp,(msb)1B 2G 1R(lsb),一个字节包含两个像素,字节中的第一个像素是4 msb位
+     */
+    public static int AV_PIX_FMT_BGR4 = 20;
+    /**
+     * 打包RGB 1:2:1,8bpp,(msb)1B 2G 1R(lsb)
+     */
+    public static int AV_PIX_FMT_BGR4_BYTE = 21;
+    /**
+     * 打包RGB 3:3:2,8bpp,(msb)2R 3G 3B(lsb)
+     */
+    public static int AV_PIX_FMT_RGB8 = 22;
+    /**
+     * 打包RGB 1:2:1比特流,4bpp,( msb)1R 2G 1B(lsb),
+     * 一个字节包含两个像素,字节中的第一个像素是由4 msb位组成的一个
+     */
+    public static int AV_PIX_FMT_RGB4 = 23;
+    /**
+     * 打包RGB 1 :2:1,8bpp,(msb)1R 2G 1B(lsb)
+     */
+    public static int AV_PIX_FMT_RGB4_BYTE = 24;
+    /**
+     * 平面YUV 4:2:0,12bpp,1和1个交织的平面(第一个字节U和随后的字节V)
+     */
+    public static int AV_PIX_FMT_NV12 = 25;
+    /**
+     * 如上所述,但U和V字节被交换
+     */
+    public static int AV_PIX_FMT_NV21 = 26;
+
+    /**
+     * 打包ARGB 8:8:8:8,32bpp,ARGBARGB ...
+     */
+    public static int AV_PIX_FMT_ARGB = 27;
+    /**
+     * 包装RGBA 8:8:8:8,3bpp,RGBARGBA ...
+     */
+    public static int AV_PIX_FMT_RGBA = 28;
+    /**
+     * 打包ABGR 8:8:8:8,332pp,ABGRABGR .. 。
+     */
+    public static int AV_PIX_FMT_ABGR = 29;
+    /**
+     * 包装BGRA 8:8:8:8,32bpp,BGRABGRA ...
+     */
+    public static int AV_PIX_FMT_BGRA = 30;
+
+    /**
+     * Y,16bpp,big-endian
+     */
+    public static int AV_PIX_FMT_GRAY16BE = 31;
+    /**
+     * Y,16bpp,little-endian
+     */
+    public static int AV_PIX_FMT_GRAY16LE = 32;
+    /**
+     * 平面YUV 4:4:0(1 Cr&每个1x2 Y样本的Cb样本)
+     */
+    public static int AV_PIX_FMT_YUV440P = 33;
+    /**
+     * 平面YUV 4:4:0满量程(JPEG),弃用PIX_FMT_YUV440P并设置color_range
+     */
+    public static int AV_PIX_FMT_YUVJ440P = 34;
+    /**
+     * 平面YUV 4:2:0,20bpp,(每2X& A样本1 Cr& Cb样本)
+     */
+    public static int AV_PIX_FMT_YUVA420P = 35;
+    /**
+     * 使用VDPAU的H.264硬件解码,数据[0]包含一个vdpau_render_state结构,
+     * 其中包含片段的比特流以及从头部提取的各种字段
+     */
+    public static int AV_PIX_FMT_VDPAU_H264 = 36;
+    /**
+     * 使用VDPAU的MPEG-1硬件解码,数据[0]包含一个vdpau_render_state结构,
+     * 其中包含片段的比特流以及从标题中提取的各种字段
+     */
+    public static int AV_PIX_FMT_VDPAU_MPEG1 = 37;
+    /**
+     * 使用VDPAU的MPEG-2硬件解码,数据[0]包含一个vdpau_render_state结构,
+     * 其中包含片段的比特流以及从头部提取的各种字段
+     */
+    public static int AV_PIX_FMT_VDPAU_MPEG2 = 38;
+    /**
+     * 使用VDPAU的WMV3硬件解码,数据[0]包含一个包含片段的比特流的vdpau_render_state结构以及从头提取的各种字段
+     */
+    public static int AV_PIX_FMT_VDPAU_WMV3 = 39;
+    /**
+     * 使用VDPAU的VC-1 HW解码,数据[0]包含一个vdpau_render_state结构,
+     * 其中包含片段的比特流以及从标题中提取的各种字段
+     */
+    public static int AV_PIX_FMT_VDPAU_VC1 = 40;
+    /**
+     * 打包RGB 16:16:16,48bpp,16R,16G,16B,每个R / G / B组件的2字节值存储为big-endian
+     */
+    public static int AV_PIX_FMT_RGB48BE = 41;
+    /**
+     * 打包RGB 16:16:16,48bpp,16R,16G,16B,每个R / G / B组件的2字节值存储为小尾数
+     */
+    public static int AV_PIX_FMT_RGB48LE = 42;
+
+    /**
+     * 打包RGB 5:6:5,16bpp,(msb)5R 6G 5B(lsb),big-endian
+     */
+    public static int AV_PIX_FMT_RGB565BE = 43;
+    /**
+     * 包装RGB 5:6:5,16bpp,(msb)5R 6G 5B(lsb),little-endian
+     */
+    public static int AV_PIX_FMT_RGB565LE = 44;
+    /**
+     * 打包RGB 5:5:5,16bpp,(msb)1A 5R 5G 5B(lsb),大端,最高有效位为0
+     */
+    public static int AV_PIX_FMT_RGB555BE = 45;
+    /**
+     * 包装RGB 5:5:5,16bpp,(msb)1A 5R 5G 5B(lsb),小端,最高有效位为0
+     */
+    public static int AV_PIX_FMT_RGB555LE = 46;
+
+    /**
+     * 包装BGR 5:6:5,16bpp,(msb)5B 6G 5R(lsb),big-endian
+     */
+    public static int AV_PIX_FMT_BGR565BE = 47;
+    /**
+     * 打包BGR 5:6:5,16bpp ,(msb)5B 6G 5R(lsb),little-endian
+     */
+    public static int AV_PIX_FMT_BGR565LE = 48;
+    /**
+     * 打包BGR 5:5:5,16bpp,(msb)1A 5B 5G 5R(lsb ),big-endian,最高有效位到1
+     */
+    public static int AV_PIX_FMT_BGR555BE = 49;
+    /**
+     * 打包BGR 5:5:5,16bpp,(msb)1A 5B 5G 5R(lsb) -endian,最高有效位到1
+     */
+    public static int AV_PIX_FMT_BGR555LE = 50;
+
+    /**
+     * 通过VA API在运动补偿入口处的HW加速,Picture.
+     * data [3]包含一个vaapi_render_state struct包含宏块以及从头文件提取的各种字段
+     */
+    public static int AV_PIX_FMT_VAAPI_MOCO = 51;
+    /**
+     * 通过IDC入口点的VA API加速硬件,Picture.data [3]包含一个vaapi_render_state结构,其中包含从头部提取的字段
+     */
+    public static int AV_PIX_FMT_VAAPI_IDCT = 52;
+    /**
+     * 通过VA API进行HW解码,Picture.data [3]包含一个vaapi_render_state结构,其中包含片段的比特流以及从标题中提取的各种字段
+     */
+    public static int AV_PIX_FMT_VAAPI_VLD = 53;
+
+    /**
+     * 平面YUV 4:2:0,24bpp,(1 Cr&每个2x2 Y样本的Cb样本),little-endian
+     */
+    public static int AV_PIX_FMT_YUV420P16LE = 54;
+    /**
+     * 平面YUV 4:2:0,248pp,(每2×2个样本1 Cr& Cb样本) ,big-endian
+     */
+    public static int AV_PIX_FMT_YUV420P16BE = 55;
+    /**
+     * 平面YUV 4:2:2,32bpp,(每2×1个样本1 Cr& Cb样本),little-endian
+     */
+    public static int AV_PIX_FMT_YUV422P16LE = 56;
+    /**
+     * 平面YUV 4:2:2,32bpp,(每2×1个样本1 Cr& Cb样本),big-endian
+     */
+    public static int AV_PIX_FMT_YUV422P16BE = 57;
+    /**
+     * 平面YUV 4:4:4,48bpp,(每1×1个样本1 Cr& Cb样本),小尾数
+     */
+    public static int AV_PIX_FMT_YUV444P16LE = 58;
+    /**
+     * 平面YUV 4:4:4,48bpp,(每1×1个样品1 Cr& Cb样品),big-endian
+     */
+    public static int AV_PIX_FMT_YUV444P16BE = 59;
+    /**
+     * 使用VDPAU的MPEG4硬件解码,数据[0]包含一个vdpau_render_state结构,其中包含片段的比特流以及从头部提取的各种字段
+     */
+    public static int AV_PIX_FMT_VDPAU_MPEG4 = 60;
+    /**
+     * 硬解码thr哦DXVA2,Picture.data [3]包含一个LPDIRECT3DSURFACE9指针
+     */
+    public static int AV_PIX_FMT_DXVA2_VLD = 61;
+
+    /**
+     * 打包RGB 4:4:4,16bpp,(msb)4A 4R 4G 4B(lsb),小端,最高有效位为0
+     */
+    public static int AV_PIX_FMT_RGB444LE = 62;
+    /**
+     * 打包RGB 4:4:4,16bpp,(msb)4A 4R 4G 4B(lsb ),big-endian,最高有效位为0
+     */
+    public static int AV_PIX_FMT_RGB444BE = 63;
+    /**
+     * 打包BGR 4:4:4,16bpp,(msb)4A 4B 4G 4R(lsb) -endian,最高有效位为1
+     */
+    public static int AV_PIX_FMT_BGR444LE = 64;
+    /**
+     * 打包BGR 4:4:4,16bpp,(msb)4A 4B 4G 4R(lsb),big-endian,最高有效位为1
+     */
+    public static int AV_PIX_FMT_BGR444BE = 65;
+    /**
+     * 8位灰色,8位alpha
+     */
+    public static int AV_PIX_FMT_YA8 = 66;
+}

+ 178 - 36
jeecg-boot-module-system/src/main/java/org/jeecg/modules/demo/opencv/CornerCheck.java

@@ -1,4 +1,3 @@
-/*
 package org.jeecg.modules.demo.opencv;
 
 import cn.com.ctop.common.module.utils.ResultMapUtils;
@@ -9,10 +8,9 @@ import org.opencv.imgproc.Imgproc;
 
 import java.util.*;
 
-*/
 /**
  * 角点检测
- *//*
+ */
 
 public class CornerCheck {
     static {
@@ -26,24 +24,6 @@ public class CornerCheck {
         channelSplit();
     }
 
-    public static void channelSplit() {
-        Mat srcImage = Imgcodecs.imread("D:\\data\\235.jpg", Imgcodecs.IMREAD_COLOR);
-        List<Mat> channels = new ArrayList<>();
-        Core.split(srcImage, channels);
-
-        //创建类型为Mat,数组长度为3的变量mbgr
-        List<Mat> mbgr = new ArrayList<>();
-        Mat hideChannel = new Mat(srcImage.size(), CvType.CV_8UC1, new Scalar(0));
-
-        Mat imageB = new Mat(srcImage.size(), CvType.CV_8UC3);
-        mbgr.add(channels.get(0));
-        mbgr.add(hideChannel);
-        mbgr.add(hideChannel);
-        Core.merge(mbgr, imageB);
-        Imgcodecs.imwrite("D:\\data\\imageB.jpg", imageB);
-
-    }
-
     public static Map<String, Object> detectConers() {
         Map<String, Object> result = new HashMap<>();
         try {
@@ -145,25 +125,56 @@ public class CornerCheck {
         return matCornerList;
     }
 
-    public static List<Point> detectConerList() {
+    public static List<Point> detectConerList(String filePath) {
         List<Point> result = new ArrayList<>();
         try {
             System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
-
-            final int maxCorners = 50, blockSize = 3;
-            final double qualityLevel = 0.01, minDistance = 20.0, k = 0.04;
+            Mat src = Imgcodecs.imread(filePath);
+            if (src.empty()) {
+                throw new Exception("no file");
+            }
+            final int maxCorners = 1000, blockSize = 5;
+            final double qualityLevel = 0.05, minDistance = 10.0, k = 0.04;
             final boolean useHarrisDetector = false;
             MatOfPoint corners = new MatOfPoint();
 
-            Mat src = Imgcodecs.imread("D:\\data\\167.jpg");
-            if (src.empty()) {
-                throw new Exception("no file");
+            for (int i = 0; i < src.cols(); i++) {
+                for (int j = 0; j < src.rows(); j++) {
+                    if (i > 160 && i < 470 && j > 250 && j < 750) {
+                        Imgproc.circle(src, new Point(i, j), 1, new Scalar(0, 236, 2), Imgproc.FILLED);
+                    }
+                }
             }
+//            //卷积
+//            Mat kernel = new Mat(3, 3, CvType.CV_16SC1);
+//            kernel.put(0, 0, 0, -1, 0, -1, 5, -1, 0, -1, 0);
+//            Imgproc.filter2D(src, src, src.depth(), kernel);
+//            //降噪
+//            Imgproc.GaussianBlur(src, src, new Size(3, 3), 0, 0, 1);
             Mat dst = src.clone();
             Mat gray = new Mat();
+            //灰度
             Imgproc.cvtColor(src, gray, Imgproc.COLOR_RGB2GRAY);
-            Imgproc.goodFeaturesToTrack(gray, corners, maxCorners, qualityLevel, minDistance,
-                    new Mat(), blockSize, useHarrisDetector, k);
+//            //归一处理
+//            Core.normalize(gray,gray);
+            //Sobel算子
+//            Mat grad_x = new Mat(), grad_y = new Mat();
+//            Mat abs_grad_x = new Mat(), abs_grad_y = new Mat();
+//            int scale = 1;
+//            int delta = 0;
+//            //Gradient X
+//            Imgproc.Sobel(gray, grad_x, CvType.CV_16S, 1, 0, 3, scale, delta, 1);
+//            //Gradient y
+//            Imgproc.Sobel(gray, grad_y, CvType.CV_16S, 0, 1, 3, scale, delta, 1);
+//
+//            Core.convertScaleAbs(grad_x, abs_grad_x);
+//            Core.convertScaleAbs(grad_y, abs_grad_y);
+//
+//            /// Total Gradient (approximate)
+//            Mat grad = new Mat();
+//            Core.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, grad);
+
+            Imgproc.goodFeaturesToTrack(gray, corners, maxCorners, qualityLevel, minDistance, new Mat(), blockSize, useHarrisDetector, k);
             Point[] pCorners = corners.toArray();
             List<Point> pointList = new ArrayList<>();
             double maxX = 0d;
@@ -171,7 +182,8 @@ public class CornerCheck {
             double minX = 10000d;
             double minY = 100000d;
             for (int i = 0; i < pCorners.length; i++) {
-                if (pCorners[i].x > 130 && pCorners[i].x < 570 && pCorners[i].y > 200 && pCorners[i].y < 850) {
+//                Imgproc.circle(dst, pCorners[i], 4, new Scalar(255, 0, 255), Imgproc.FILLED);
+                if (pCorners[i].x > 120 && pCorners[i].x < 555 && pCorners[i].y > 200 && pCorners[i].y < 860) {
                     if (pCorners[i].x >= maxX) {
                         maxX = pCorners[i].x;
                     }
@@ -189,17 +201,62 @@ public class CornerCheck {
                     pointList.add(pCorners[i]);
                 }
             }
+            Imgproc.line(dst, new Point(minX, minY), new Point(minX, maxY), new Scalar(255, 255, 255));
+            Imgproc.line(dst, new Point(minX, maxY), new Point(maxX, maxY), new Scalar(255, 255, 255));
+            Imgproc.line(dst, new Point(maxX, maxY), new Point(maxX, minY), new Scalar(255, 255, 255));
+            Imgproc.line(dst, new Point(maxX, minY), new Point(minX, minY), new Scalar(255, 255, 255));
             List<Point> cornerPoints = new ArrayList<>();
             for (Point point : pointList) {
                 double getX = point.x;
                 double getY = point.y;
-                if ((getY >= (minY + 20) && getY <= (maxY - 20))) {
-                    Imgproc.circle(dst, point, 4, new Scalar(255, 255, 255), Imgproc.FILLED);
+                if ((getY >= (minY + 50) && getY <= (maxY - 50)) || (getX >= (minX + 25) && getX <= (maxX - 70))) {
+                    Imgproc.circle(dst, point, 4, new Scalar(0, 255, 0), Imgproc.FILLED);
                 } else {
+                    Imgproc.circle(dst, point, 4, new Scalar(255, 0, 255), Imgproc.FILLED);
                     cornerPoints.add(point);
-                    Imgproc.circle(dst, point, 4, new Scalar(0, 0, 255), Imgproc.FILLED);
                 }
             }
+            Point[] cornerArray = new Point[cornerPoints.size()];
+
+            for (int i = 0; i < cornerPoints.size(); i++) {
+                cornerArray[i] = cornerPoints.get(i);
+            }
+            /*MatOfPoint2f srcPoint = new MatOfPoint2f(cornerArray);
+            MatOfPoint2f destPoint = new MatOfPoint2f();
+            Imgproc.approxPolyDP(srcPoint,destPoint,100,true);
+            Point []des = destPoint.toArray();
+            for (int i=0;i<des.length-2;i++) {
+                Imgproc.line(dst,des[i],des[i+1],new Scalar(255,255,255));
+            }*/
+            Collections.sort(cornerPoints, new Comparator<Point>() {
+                @Override
+                public int compare(Point p1, Point p2) {
+                    return (int) (p1.x + p1.y - p2.x - p2.y);
+                }
+            });
+            List<Mat> channels = new ArrayList<>();
+            Core.split(src, channels);
+            List<Mat> mbgr = new ArrayList<>();
+            Mat hideChannel = new Mat(src.size(), CvType.CV_8UC1, new Scalar(0));
+            Mat imageGR = new Mat(src.size(), CvType.CV_8UC3);
+            mbgr = new ArrayList<>();
+            mbgr.add(hideChannel);
+            mbgr.add(channels.get(1));
+            mbgr.add(channels.get(2));
+            Core.merge(mbgr, imageGR);
+            List<Point> checkPoints = new ArrayList<>();
+            Point rightDown = cornerPoints.get(cornerPoints.size() - 1);
+            for (Point point : cornerPoints) {
+                System.out.println("cornerPoint:" + point.toString());
+                System.out.println(imageGR.get((int) point.y, (int) point.x)[1]);
+                if (imageGR.get((int) point.y, (int) point.x)[1] >= 120) {
+                    if (point.x <= rightDown.x) {
+                        System.out.println("greenPoint:" + point.toString());
+                        checkPoints.add(point);
+                    }
+                }
+            }
+            cornerPoints = checkPoints;
             Collections.sort(cornerPoints, new Comparator<Point>() {
                 @Override
                 public int compare(Point p1, Point p2) {
@@ -220,12 +277,97 @@ public class CornerCheck {
                 result.add(new Point(cornerPoints.get(1).x - EXPAND_SIZE, cornerPoints.get(1).y + EXPAND_SIZE));
             }
             //右下
-            result.add(new Point(cornerPoints.get(3).x + EXPAND_SIZE, cornerPoints.get(3).y + EXPAND_SIZE));
+            result.add(new Point(cornerPoints.get(cornerPoints.size() - 1).x + EXPAND_SIZE, cornerPoints.get(cornerPoints.size() - 1).y + EXPAND_SIZE));
+
+            for (Point point : result) {
+                Imgproc.circle(dst, point, 10, new Scalar(255, 0, 255), Imgproc.FILLED);
+            }
+            Imgcodecs.imwrite("D:\\data\\allConers.jpg", dst);
             return result;
         } catch (Exception e) {
             e.printStackTrace();
             return result;
         }
     }
+
+    public static void channelSplit() {
+        Mat srcImage = Imgcodecs.imread("D:\\data\\163.jpg", Imgcodecs.IMREAD_COLOR);
+        List<Mat> channels = new ArrayList<>();
+        Core.split(srcImage, channels);
+
+        //创建类型为Mat,数组长度为3的变量mbgr
+        List<Mat> mbgr = new ArrayList<>();
+        Mat hideChannel = new Mat(srcImage.size(), CvType.CV_8UC1, new Scalar(0));
+
+        Mat imageB = new Mat(srcImage.size(), CvType.CV_8UC3);
+        mbgr.add(channels.get(0));
+        mbgr.add(hideChannel);
+        mbgr.add(hideChannel);
+        Core.merge(mbgr, imageB);
+        List<Point> points = detectConerList("D:\\data\\imageB.jpg");
+        for (Point point : points) {
+            Imgproc.circle(imageB, point, 5, new Scalar(0, 0, 0));
+        }
+        Imgcodecs.imwrite("D:\\data\\imageB.jpg", imageB);
+
+        mbgr = new ArrayList<>();
+        Mat imageG = new Mat(srcImage.size(), CvType.CV_8UC3);
+        mbgr.add(hideChannel);
+        mbgr.add(channels.get(1));
+        mbgr.add(hideChannel);
+        Core.merge(mbgr, imageG);
+        points = detectConerList("D:\\data\\imageG.jpg");
+        for (Point point : points) {
+            Imgproc.circle(imageG, point, 5, new Scalar(0, 0, 0));
+        }
+        Imgcodecs.imwrite("D:\\data\\imageG.jpg", imageG);
+
+        mbgr = new ArrayList<>();
+        Mat imageR = new Mat(srcImage.size(), CvType.CV_8UC3);
+        mbgr.add(hideChannel);
+        mbgr.add(hideChannel);
+        mbgr.add(channels.get(2));
+        Core.merge(mbgr, imageR);
+        points = detectConerList("D:\\data\\imageR.jpg");
+        for (Point point : points) {
+            Imgproc.circle(imageR, point, 5, new Scalar(0, 0, 0));
+        }
+        Imgcodecs.imwrite("D:\\data\\imageR.jpg", imageR);
+
+        mbgr = new ArrayList<>();
+        Mat imageBG = new Mat(srcImage.size(), CvType.CV_8UC3);
+        mbgr.add(channels.get(0));
+        mbgr.add(channels.get(1));
+        mbgr.add(hideChannel);
+        Core.merge(mbgr, imageBG);
+        points = detectConerList("D:\\data\\imageBG.jpg");
+        for (Point point : points) {
+            Imgproc.circle(imageBG, point, 5, new Scalar(0, 0, 0));
+        }
+        Imgcodecs.imwrite("D:\\data\\imageBG.jpg", imageBG);
+
+        Mat imageGR = new Mat(srcImage.size(), CvType.CV_8UC3);
+        mbgr = new ArrayList<>();
+        mbgr.add(hideChannel);
+        mbgr.add(channels.get(1));
+        mbgr.add(channels.get(2));
+        Core.merge(mbgr, imageGR);
+        points = detectConerList("D:\\data\\imageGR.jpg");
+        for (Point point : points) {
+            Imgproc.circle(imageGR, point, 5, new Scalar(0, 0, 0));
+        }
+        Imgcodecs.imwrite("D:\\data\\imageGR.jpg", imageGR);
+
+        mbgr = new ArrayList<>();
+        Mat imageBR = new Mat(srcImage.size(), CvType.CV_8UC3);
+        mbgr.add(channels.get(0));
+        mbgr.add(hideChannel);
+        mbgr.add(channels.get(2));
+        Core.merge(mbgr, imageBR);
+        points = detectConerList("D:\\data\\imageBR.jpg");
+        for (Point point : points) {
+            Imgproc.circle(imageBR, point, 5, new Scalar(0, 0, 0));
+        }
+        Imgcodecs.imwrite("D:\\data\\imageBR.jpg", imageBR);
+    }
 }
-*/

+ 142 - 0
jeecg-boot-module-system/src/main/java/org/jeecg/modules/demo/opencv/FFmpegUtils.java

@@ -0,0 +1,142 @@
+package org.jeecg.modules.demo.opencv;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.UUID;
+
+public class FFmpegUtils {
+    public static void videoToPcm(String vedioUrl, String m4aUrl, String ffmpegUrl) throws InterruptedException, IOException {
+        List<String> commend = new ArrayList<String>();
+        commend.add(ffmpegUrl);
+        commend.add("-i");
+        commend.add(vedioUrl);
+        commend.add("-vn");
+        commend.add("y");
+        commend.add("-acodec");
+        commend.add("copy");
+        commend.add(m4aUrl);
+        ProcessBuilder builder = new ProcessBuilder();
+        builder.command(commend);
+        builder.redirectErrorStream(true);
+        Process process = builder.start();
+        process.waitFor();// 等待进程执行结束
+    }
+
+    public static void mergeVideodioAndPcm(String videoInputPath, String audioInputPath, String videoOutPath, String ffmpegUrl) throws InterruptedException, IOException {
+        /**
+         *  String command = FFMPEG_PATH + " -i " + videoInputPath + " -i " + audioInputPath
+         *   + " -c:v copy -c:a aac -strict experimental " +
+         *   " -map 0:v:0 -map 1:a:0 "
+         *   + " -y " + videoOutPath;
+         */
+        List<String> commend = new ArrayList<String>();
+        commend.add(ffmpegUrl);
+        commend.add("-i");
+        commend.add(videoInputPath);
+        commend.add("-i");
+        commend.add(audioInputPath);
+        commend.add(" -c:v");
+        commend.add("copy");
+        commend.add("-c:a");
+        commend.add("acc");
+        commend.add("-strict");
+        commend.add("experimental");
+        commend.add(videoOutPath);
+        ProcessBuilder builder = new ProcessBuilder();
+        builder.command(commend);
+        builder.redirectErrorStream(true);
+        Process process = builder.start();
+        process.waitFor();// 等待进程执行结束
+    }
+
+    public static void main(String[] args) throws InterruptedException, IOException {
+//        videoToPcm("D:\\data\\fill.mp4","D:\\data\\fill.m4a","D:\\java\\ffmpeg\\bin\\ffmpeg.exe");
+//        mergeVideodioAndPcm("D:\\data\\merge.mp4","D:\\data\\fill.m4a","D:\\data\\fill2.mp4","D:\\java\\ffmpeg\\bin\\ffmpeg.exe");
+        try {
+            videoToAudio("D:\\data\\test\\fill.mp4", "D:\\data\\test\\123.mp3");
+            String videoInputPath = "D:\\data\\test\\merge.mp4";
+            String audioInputPath = "D:\\data\\test\\123.mp3";
+            String videoOutPath = "D:\\data\\test\\fill.mp4";
+            convetor(videoInputPath, audioInputPath, videoOutPath);
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+        System.out.println("---------获取音频文件成功!-----------");
+    }
+
+    /**
+     * @param videoInputPath 原视频的全路径
+     * @param audioInputPath 音频的全路径
+     * @param videoOutPath   视频与音频结合之后的视频的路径
+     * @throws Exception
+     */
+    public static void convetor(String videoInputPath, String audioInputPath, String videoOutPath)
+            throws Exception {
+        Process process = null;
+        try {
+            String command = FFMPEG_PATH + " -i " + videoInputPath + " -i " + audioInputPath + " -c:v copy -c:a aac -strict experimental " +
+                    " -map 0:v:0 -map 1:a:0 "
+                    + " -y " + videoOutPath;
+            System.out.println(command);
+            process = Runtime.getRuntime().exec(command);
+            process.waitFor();
+        } catch (IOException e) {
+            // TODO Auto-generated catch block
+            e.printStackTrace();
+        }
+        // 使用这种方式会在瞬间大量消耗CPU和内存等系统资源,所以这里我们需要对流进行处理
+        InputStream errorStream = process.getErrorStream();
+        InputStreamReader inputStreamReader = new InputStreamReader(errorStream);
+        BufferedReader br = new BufferedReader(inputStreamReader);
+
+        String line = "";
+        while ((line = br.readLine()) != null) {
+        }
+        if (br != null) {
+            br.close();
+        }
+        if (inputStreamReader != null) {
+            inputStreamReader.close();
+        }
+        if (errorStream != null) {
+            errorStream.close();
+        }
+
+    }
+
+    /**
+     * 从视频中提取音频信息
+     *
+     * @param videoUrl
+     * @return
+     */
+    public static String videoToAudio(String videoUrl, String accFile) {
+        String aacFile = "";
+        try {
+            aacFile = TMP_PATH + "/" + new SimpleDateFormat("yyyyMMddHHmmss").format(new Date())
+                    + UUID.randomUUID().toString().replaceAll("-", "") + ".mp3";
+            String command = FFMPEG_PATH + " -i " + videoUrl + " -vn -acodec copy " + aacFile;
+            System.out.println("video to audio command : " + command);
+            Process process = Runtime.getRuntime().exec(command);
+            process.waitFor();
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+        return "";
+    }
+
+    /**
+     * FFmpeg全路径
+     */
+    private static final String FFMPEG_PATH = "D:\\java\\ffmpeg\\bin\\ffmpeg.exe";
+    /**
+     * 音频保存路径
+     */
+    private static final String TMP_PATH = "D:\\data\\test";
+}

+ 126 - 0
jeecg-boot-module-system/src/main/java/org/jeecg/modules/demo/opencv/ImageUtils.java

@@ -0,0 +1,126 @@
+package org.jeecg.modules.demo.opencv;
+
+import org.opencv.core.*;
+import org.opencv.imgcodecs.Imgcodecs;
+import org.opencv.imgproc.Imgproc;
+
+public class ImageUtils {
+
+    /**
+     * 按照指定的尺寸截取Mat,坐标原点为左上角
+     *
+     * @param src    源Mat
+     * @param width  width
+     * @param height height
+     * @return 截取后的Mat
+     */
+    public static Mat cutFromCenter(Mat src, int width, int height) {
+        if (width > src.width()) {
+            width = src.width();
+        }
+        if (height > src.height()) {
+            height = src.height();
+        }
+        int originX = (src.width() - width) / 2;
+        int originy = (src.height() - height) / 2;
+        // 截取尺寸
+        Rect rect = new Rect(originX, originy, width, height);
+        return new Mat(src, rect);
+    }
+
+    public static Mat radialBlur(Mat src) {
+        Mat returnMat = new Mat(src.size(), CvType.CV_32FC3);
+        src.convertTo(returnMat, CvType.CV_32FC3);
+        int width = src.width();
+        int height = src.height();
+        double r;
+        double angle;
+        Point center = new Point(width / 2, height / 2);
+        double t1, t2, t3;
+        int new_x, new_y;
+        int Num = 20;
+        for (int y = 0; y < height; y++) {
+            for (int x = 0; x < width; x++) {
+                t1 = 0;
+                t2 = 0;
+                t3 = 0;
+                r = Math.sqrt((y - center.y) * (y - center.y) + (x - center.x) * (x - center.x));
+                angle = Math.atan2((y - center.y), (x - center.x));
+                for (int mm = 0; mm < Num; mm++) {
+                    double tmR = r - mm > 0 ? r - mm : 0.0;
+                    new_x = (int) (tmR * Math.cos(angle) + center.x);
+                    new_y = (int) (tmR * Math.sin(angle) + center.y);
+                    if (new_x < 0) {
+                        new_x = 0;
+                    }
+                    if (new_x > width - 1) {
+                        new_x = width - 1;
+                    }
+                    if (new_y < 0) {
+                        new_y = 0;
+                    }
+                    if (new_y > height - 1) {
+                        new_y = height - 1;
+                    }
+                    t1 = t1 + src.get(new_y, new_x)[0];
+                    t2 = t2 + src.get(new_y, new_x)[1];
+                    t3 = t3 + src.get(new_y, new_x)[2];
+                }
+                returnMat.put(y, x, t1 / Num, t2 / Num, t3 / Num);
+            }
+        }
+        return returnMat;
+    }
+
+    /**
+     * 矩阵横向拼接
+     *
+     * @param matrix1
+     * @param matrix2
+     * @param matrixCom
+     * @return
+     */
+    public static Mat hconcat(Mat matrix1, Mat matrix2, Mat matrixCom) {
+        matrixCom.create(matrix1.rows(), matrix1.cols() + matrix2.cols(), matrix1.type());
+        Mat temp = matrixCom.colRange(0, matrix1.cols());
+        matrix1.copyTo(temp);
+        Mat temp1 = matrixCom.colRange(matrix1.cols(), matrix1.cols() + matrix2.cols());
+        matrix2.copyTo(temp1);
+        return matrixCom;
+    }
+
+    /**
+     * 矩阵纵向拼接
+     *
+     * @param matrix1
+     * @param matrix2
+     * @param matrixCom
+     * @return
+     */
+    public static Mat vconcat(Mat matrix1, Mat matrix2, Mat matrixCom) {
+        matrixCom.create(matrix1.rows() + matrix2.rows(), matrix1.cols(), matrix1.type());
+        Mat temp = matrixCom.rowRange(0, matrix1.rows());
+        matrix1.copyTo(temp);
+        Mat temp1 = matrixCom.rowRange(matrix1.rows(), matrix1.rows() + matrix2.rows());
+        matrix2.copyTo(temp1);
+        return matrixCom;
+    }
+
+    public static void main(String[] args) {
+        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
+        Mat image = Imgcodecs.imread("D:\\data\\167.jpg", Imgcodecs.IMREAD_COLOR);
+        Mat bigImamge = new Mat();
+        Mat smallImage = new Mat();
+        Mat halfImage = new Mat();
+        Mat wholeImage = new Mat();
+        Imgproc.resize(image, bigImamge, new Size(image.cols() * 1.25, image.rows() * 1.25));
+        Imgproc.resize(image, smallImage, new Size(image.cols() / 2, image.rows() / 2));
+        hconcat(smallImage, smallImage, halfImage);
+        vconcat(halfImage, halfImage, wholeImage);
+        Mat destImage = cutFromCenter(bigImamge, image.width(), image.height());
+        wholeImage = radialBlur(wholeImage);
+        Mat radialBlur = radialBlur(destImage);
+        Imgcodecs.imwrite("D:\\data\\radialBlur.jpg", radialBlur);
+        Imgcodecs.imwrite("D:\\data\\whole.jpg", wholeImage);
+    }
+}

+ 5 - 13
jeecg-boot-module-system/src/main/java/org/jeecg/modules/demo/opencv/Perspective.java

@@ -1,4 +1,3 @@
-/*
 package org.jeecg.modules.demo.opencv;
 
 
@@ -9,16 +8,11 @@ import org.opencv.utils.Converters;
 
 import java.util.List;
 
-*/
 /**
  * 透视变换
- *//*
+ */
 
 public class Perspective {
-    public static void main(String[] args) {
-
-    }
-
     public static void perpectiveImg(String backgroundImgPath, String phoneImgPath, String targetFileName) {
         try {
             System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
@@ -27,7 +21,7 @@ public class Perspective {
             Mat dst = Imgcodecs.imread(backgroundImgPath);
             Mat dst1 = dst.clone();
             //透视变换点位
-            List<Point> perspectivePoints = CornerCheck.detectConerList();
+            List<Point> perspectivePoints = CornerCheck.detectConerList(backgroundImgPath);
             //矩形点位
             List<Point> matPoints = CornerCheck.matCornerList(perspectivePoints);
             //读取图像到矩阵中
@@ -54,6 +48,7 @@ public class Perspective {
             Imgcodecs.imwrite(targetFileName, dst);
         } catch (Exception e) {
             e.printStackTrace();
+            System.out.println("例外:" + e);
         }
     }
 
@@ -111,15 +106,13 @@ public class Perspective {
         return (iSum % 2) != 0;
     }
 
-    */
-/**
+    /**
      * 判断点是否在多边形内,如果点位于多边形的顶点或边上,也算做点在多边形内,直接返回true
      *
      * @param point   检测点
      * @param polygon 多边形的顶点
      * @return 点在多边形内返回true, 否则返回false
-     *//*
-
+     */
     public static boolean isPtInPoly(Point point, List<Point> polygon) {
 
         int N = polygon.size();
@@ -212,4 +205,3 @@ public class Perspective {
         }
     }
 }
-*/

+ 79 - 72
jeecg-boot-module-system/src/main/java/org/jeecg/modules/demo/opencv/Test.java

@@ -1,72 +1,79 @@
-//package org.jeecg.modules.demo.opencv;
-//
-//import org.opencv.core.Core;
-//import org.opencv.core.Mat;
-//import org.opencv.core.Size;
-//import org.opencv.imgcodecs.Imgcodecs;
-//import org.opencv.imgproc.Imgproc;
-//
-//public class Test {
-//    public static void initVideo(){
-//        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
-//        String srcPath = "D:\\data\\images\\";
-//        String fillPath = "D:\\data\\fill\\";
-//        String targetPath = "D:\\data\\video\\";
-//        //1:切分视频
-////        VideoProcessing.grabberVideoFramer("D://data//video.mp4",srcPath);
-////        VideoProcessing.grabberVideoFramer("D://data//fill.mp4",fillPath);
-//        //2:合并图片
-//        int srcPhoneFrameNumFrom = 163;
-//        int changeSenceFrameNumFrom = 234;
-//        //2.1 复制原始视频无变化图片
-////        for(int i=1;i<srcPhoneFrameNumFrom;i++){
-////            Mat srcNoChangeImage = Imgcodecs.imread(srcPath+i+".jpg");
-////            Imgcodecs.imwrite(targetPath+i+".jpg",srcNoChangeImage);
-////        }
-//        String getImageName = "";
-//        //2.2 填充手机内部图片画面
-//        for (int i = 163; i < changeSenceFrameNumFrom; i++) {
-//            String backImg = srcPath + (i) + ".jpg";
-//            String phoneImg = fillPath + (i - 162) + ".jpg";
-//            String targetImg = targetPath + i + ".jpg";
-//            Perspective.perpectiveImg(backImg, phoneImg, targetImg);
-//            getImageName = targetImg;
-//        }
-//        //2.3 转场动画
-//        Mat image = Imgcodecs.imread(getImageName, Imgcodecs.IMREAD_COLOR);
-//        //(1)径向模糊图片
-//        Mat radialBlur = ImageUtils.radialBlur(image);
-//        Imgcodecs.imwrite(targetPath + changeSenceFrameNumFrom + ".jpg", radialBlur);
-//        changeSenceFrameNumFrom++;
-//        //(2)放大后径向模糊图片
-//        Mat bigImamge = new Mat();
-//        Imgproc.resize(image, bigImamge, new Size(image.cols() * 1.25, image.rows() * 1.25));
-//        Mat destBigImamge = ImageUtils.cutFromCenter(bigImamge, image.width(), image.height());
-//        Mat radialBlurDestBigImamge = ImageUtils.radialBlur(destBigImamge);
-//        Imgcodecs.imwrite(targetPath + changeSenceFrameNumFrom + ".jpg", radialBlurDestBigImamge);
-//        changeSenceFrameNumFrom++;
-//        //(3)缩小之后重复平铺加径向模糊
-//        Mat smallImage = new Mat();
-//        Mat halfImage = new Mat();
-//        Mat wholeImage = new Mat();
-//        Imgproc.resize(image, smallImage, new Size(image.cols() / 2, image.rows() / 2));
-//        ImageUtils.hconcat(smallImage, smallImage, halfImage);
-//        ImageUtils.vconcat(halfImage, halfImage, wholeImage);
-//        wholeImage = ImageUtils.radialBlur(wholeImage);
-//        Imgcodecs.imwrite(targetPath + changeSenceFrameNumFrom + ".jpg", wholeImage);
-//        changeSenceFrameNumFrom++;
-//        //复制填充视频无变化图片
-////        for(int i=changeSenceFrameNumFrom;i<100000;i++){
-////            int checkNum = i-3-srcPhoneFrameNumFrom+1;
-////            String fillImagePath = fillPath+checkNum+".jpg";
-////            File file = new File(fillImagePath);
-////            if(file.exists()){
-////                Mat srcMat = Imgcodecs.imread(fillImagePath);
-////                Imgcodecs.imwrite(targetPath+i+".jpg",srcMat);
-////            }else{
-////                break;
-////            }
-////        }
-//    }
-//
-//}
+package org.jeecg.modules.demo.opencv;
+
+import org.opencv.core.Core;
+import org.opencv.core.Mat;
+import org.opencv.core.Size;
+import org.opencv.imgcodecs.Imgcodecs;
+import org.opencv.imgproc.Imgproc;
+
+import java.io.File;
+
+public class Test {
+    public static void initVideo() {
+        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
+        String srcPath = "D:\\data\\images\\";
+        String fillPath = "D:\\data\\fill\\";
+        String targetPath = "D:\\data\\video\\";
+        //1:切分视频
+//        VideoProcessing.grabberVideoFramer("D://data//video.mp4",srcPath);
+//        VideoProcessing.grabberVideoFramer("D://data//fill.mp4",fillPath);
+        //2:合并图片
+        int srcPhoneFrameNumFrom = 163;
+        int changeSenceFrameNumFrom = 234;
+        //2.1 复制原始视频无变化图片
+        for (int i = 1; i < srcPhoneFrameNumFrom; i++) {
+            Mat srcNoChangeImage = Imgcodecs.imread(srcPath + i + ".jpg");
+            Imgcodecs.imwrite(targetPath + i + ".jpg", srcNoChangeImage);
+        }
+        String getImageName = "";
+        //2.2 填充手机内部图片画面
+        for (int i = 163; i < changeSenceFrameNumFrom; i++) {
+            String backImg = srcPath + (i) + ".jpg";
+            String phoneImg = fillPath + (i - 162) + ".jpg";
+            String targetImg = targetPath + i + ".jpg";
+            Perspective.perpectiveImg(backImg, phoneImg, targetImg);
+            getImageName = targetImg;
+        }
+        //2.3 转场动画
+        Mat image = Imgcodecs.imread(getImageName, Imgcodecs.IMREAD_COLOR);
+        //(1)径向模糊图片
+        Mat radialBlur = ImageUtils.radialBlur(image);
+        Imgcodecs.imwrite(targetPath + changeSenceFrameNumFrom + ".jpg", radialBlur);
+        changeSenceFrameNumFrom++;
+        //(2)放大后径向模糊图片
+        Mat bigImamge = new Mat();
+        Imgproc.resize(image, bigImamge, new Size(image.cols() * 1.25, image.rows() * 1.25));
+        Mat destBigImamge = ImageUtils.cutFromCenter(bigImamge, image.width(), image.height());
+        Mat radialBlurDestBigImamge = ImageUtils.radialBlur(destBigImamge);
+        Imgcodecs.imwrite(targetPath + changeSenceFrameNumFrom + ".jpg", radialBlurDestBigImamge);
+        changeSenceFrameNumFrom++;
+        //(3)缩小之后重复平铺加径向模糊
+        Mat smallImage = new Mat();
+        Mat halfImage = new Mat();
+        Mat wholeImage = new Mat();
+        Imgproc.resize(image, smallImage, new Size(image.cols() / 2, image.rows() / 2));
+        ImageUtils.hconcat(smallImage, smallImage, halfImage);
+        ImageUtils.vconcat(halfImage, halfImage, wholeImage);
+        wholeImage = ImageUtils.radialBlur(wholeImage);
+        Imgcodecs.imwrite(targetPath + changeSenceFrameNumFrom + ".jpg", wholeImage);
+        changeSenceFrameNumFrom++;
+        //复制填充视频无变化图片
+        for (int i = changeSenceFrameNumFrom; i < 100000; i++) {
+            int checkNum = i - 3 - srcPhoneFrameNumFrom + 1;
+            String fillImagePath = fillPath + checkNum + ".jpg";
+            File file = new File(fillImagePath);
+            if (file.exists()) {
+                Mat srcMat = Imgcodecs.imread(fillImagePath);
+                Imgcodecs.imwrite(targetPath + i + ".jpg", srcMat);
+            } else {
+                break;
+            }
+        }
+    }
+
+
+    public static void main(String[] args) {
+        initVideo();
+    }
+
+}

+ 45 - 27
jeecg-boot-module-system/src/main/java/org/jeecg/modules/demo/opencv/VideoProcessing.java

@@ -1,34 +1,31 @@
-/*
 package org.jeecg.modules.demo.opencv;
 
-import org.bytedeco.javacv.FFmpegFrameGrabber;
-import org.bytedeco.javacv.Frame;
-import org.bytedeco.javacv.Java2DFrameConverter;
+import org.bytedeco.javacpp.avcodec;
+import org.bytedeco.javacpp.opencv_core;
+import org.bytedeco.javacv.*;
 
 import javax.imageio.ImageIO;
 import java.awt.image.BufferedImage;
 import java.io.File;
 
-*/
+import static org.bytedeco.javacpp.opencv_imgcodecs.cvLoadImage;
+
 /**
  * 视频截帧处理
- *//*
-
+ */
 public class VideoProcessing {
     public static String videoPath = "D:/data";
 
     //存放截取视频某一帧的图片
-    public static String videoFramesPath = "D:/data/images/";
+    public static String videoFramesPath = "D:/data/test/";
 
-    */
-/**
+    /**
      * 将视频文件帧处理并以“jpg”格式进行存储。
      * 依赖FrameToBufferedImage方法:将frame转换为bufferedImage对象
-     *
+     * 视频帧拆分
      * @param videoFileName
-     *//*
-
-    public static String grabberVideoFramer(String videoFileName) {
+     */
+    public static String grabberVideoFramer(String videoFileName, String filePackageName) {
         //最后获取到的视频的图片的路径
         String videPicture = "";
         //Frame对象
@@ -36,24 +33,17 @@ public class VideoProcessing {
         //标识
         int flag = 0;
         try {
-			 */
-/*
-            获取视频文件
-            *//*
-
+            /*获取视频文件*/
             FFmpegFrameGrabber fFmpegFrameGrabber = new FFmpegFrameGrabber(videoPath + "/" + videoFileName);
             fFmpegFrameGrabber.start();
 
             //获取视频总帧数
             int ftp = fFmpegFrameGrabber.getLengthInFrames();
+            System.out.println("时长 " + ftp / fFmpegFrameGrabber.getFrameRate() / 60);
 
             while (flag <= ftp) {
                 frame = fFmpegFrameGrabber.grabImage();
-				*/
-/*
-				对视频的第五帧进行处理
-				 *//*
-
+                /*对视频的第五帧进行处理*/
                 if (frame != null) {
                     //文件绝对路径+名字
                     String fileName = videoFramesPath + String.valueOf(flag) + ".jpg";
@@ -72,6 +62,32 @@ public class VideoProcessing {
         return videPicture;
     }
 
+    public static void videoMerge(String videoFileName, String imagePackage) throws FrameRecorder.Exception {
+        FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(videoFileName, 720, 1280);
+//
+        recorder.setVideoCodec(avcodec.AV_CODEC_ID_MPEG4); // 28
+//		recorder.setFormat("mp4");
+        //	recorder.setFormat("mov,mp4,m4a,3gp,3g2,mj2,h264,ogg,MPEG4");
+        recorder.setFrameRate(25);
+        recorder.setPixelFormat(Avutils.AV_PIX_FMT_YUV422P); // yuv420p
+        recorder.start();
+        //
+        OpenCVFrameConverter.ToIplImage conveter = new OpenCVFrameConverter.ToIplImage();
+        // 列出目录中所有的图片,都是jpg的,以1.jpg,2.jpg的方式,方便操作
+        File file = new File(imagePackage);
+        File[] flist = file.listFiles();
+        // 循环所有图片
+        for (int i = 1; i <= flist.length; i++) {
+            String fname = imagePackage + i + ".jpg";
+            opencv_core.IplImage image = cvLoadImage(fname); // 非常吃内存!!
+            recorder.record(conveter.convert(image));
+            // 释放内存? cvLoadImage(fname); // 非常吃内存!!
+            opencv_core.cvReleaseImage(image);
+        }
+        recorder.stop();
+        recorder.release();
+    }
+
     public static BufferedImage FrameToBufferedImage(Frame frame) {
         //创建BufferedImage对象
         Java2DFrameConverter converter = new Java2DFrameConverter();
@@ -80,8 +96,10 @@ public class VideoProcessing {
     }
 
     public static void main(String[] args) {
-        String videoFileName = "video.mp4";
-        grabberVideoFramer(videoFileName);
+        try {
+            videoMerge("D:\\data\\merge.mp4", "D:\\data\\video\\");
+        } catch (FrameRecorder.Exception e) {
+            e.printStackTrace();
+        }
     }
 }
-*/