文档章节

VedioTrack

二胡艺
 二胡艺
发布于 2017/05/19 18:47
字数 751
阅读 20
收藏 0

背景差分法

%matplotlib inline
import cv2
import numpy as np
from collections import deque

class Detector:
    def __init__(self):
        self.framesPool=deque(maxlen=5)
    def detect(self,frame):
        imgBg = self.calcBackgroud()
        #cv2.imshow("imbg",imgBg)
        imgDiff = np.abs(frame - imgBg)
        return imgDiff
    def calcBackgroud(self):
        im_shape = self.framesPool[0].shape
        buff = np.empty((len(self.framesPool),im_shape[0],im_shape[1]))
        for i in range(len(self.framesPool)):
            buff[i] = self.framesPool[i]
        imgbg = np.zeros(im_shape)
        for r in range(im_shape[0]):
            for c in range(im_shape[1]):
                imgbg[r,c] = np.median(buff[:,r,c])
        return imgbg
    def update(self,frame):
        self.framesPool.append(frame)

中值背景建模

class Detector2:
    def __init__(self):
        self.imgbg = None
    def detect(self,frame):        
        frame = cv2.GaussianBlur(frame,(5,5),1)
        diff = np.abs(frame - self.imgbg)
        elem = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
        diff = cv2.morphologyEx(diff,cv2.MORPH_OPEN,elem,iterations=2)
        diff = cv2.morphologyEx(diff,cv2.MORPH_CLOSE,elem,iterations=1)
        _,diff = cv2.threshold(diff,0,255,cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        _,contours,_ = cv2.findContours(diff,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
        rois = []
        for c in contours:
            rec = cv2.boundingRect(c)
            if rec[2] * rec[3] < 400:
                continue
            rois.append(rec)
            
        return diff,rois
    def update(self,frame):
        if self.imgbg == None:
            self.imgbg = frame
        else:
            one = np.ones_like(self.imgbg)
            zero = np.zeros_like(self.imgbg)
            onei = np.ones_like(self.imgbg) * -1
            result = np.zeros_like(self.imgbg)
            
            cond_pos = np.float64(frame) - np.float64(self.imgbg) > 0
            cond_neg = np.float64(frame)- np.float64(self.imgbg) < 0
            
            pos = np.where(cond_pos,one,zero)
            neg = np.where(cond_neg,onei,zero)
            
            result =result+ pos + neg
            self.imgbg = (self.imgbg.astype(np.float64) + result).astype(np.uint8)
            cv2.imshow("imgbg",self.imgbg)
            

高斯混合模型

import numpy as np
import cv2

class GMM:
    def __init__(self,frame_size):
        """
        @size - frame size
        """
        self.K = 5
        self.alpha = 0.01
        self.T = 0.25
        self.D = 2.5
        self.sd_init = 6
        
        self.params = np.zeros((self.K,3,frame_size[0],frame_size[1]))
        for r in range(frame_size[0]):
            for c in range(frame_size[1]):
                param = self.params[:,:,r,c] #p in param => (u_k,sigma_k,w_k)
                for p in param:
                    p[0] = np.random.randint(0,255)
                    p[1] = self.sd_init
                    p[2] = 1 / self.K
    
    def detect(self,frame):
        shape = frame.shape
        imgfront = np.zeros_like(frame)
        imgback = np.zeros_like(frame)
        
        for r in range(shape[0]):
            for c in range(shape[1]):
                param = self.params[:,:,r,c]
                match,w_sum = 0,0
                pixel = frame[r,c]
                for i,p in enumerate(param):
                    bmatched = True if np.abs(pixel - p[0]) <= self.D * np.sqrt(p[1]) else False
                    if bmatched == 1:
                        match = 1
                        p[2] = (1 - self.alpha) * p[2] + self.alpha # w_k
                        rho = self.alpha / p[2]
                        p[0] = (1 - rho) * p[0] + rho * pixel #u_k
                        p[1] = (1 - rho) * p[1] + rho * (pixel - p[0])**2  #sigma_k 
                    else:
                        p[2] = (1 - self.alpha) * p[2]
                    w_sum += p[2]
                
                #re-nomalize
                for p in param:
                    p[2] = p[2] / w_sum
                    
                if match == 0:
                    W = np.array([ p[2] for p in param])
                    Sigma = np.array([ p[1] for p in param])
                    min_ind = W.argmin()
                    param[min_ind][0] = pixel
                    param[min_ind][1] = 2 * Sigma.max()
                    
                imgback[r,c] = np.array([p[0] * p[2] for p in param]).sum()
                
                rank = np.array([p[2] / np.sqrt(p[1]) for p in param])
                rank_ind = np.argsort(-rank)

                for ri in rank_ind:
                    p = param[ri]
                    u,s,w = p[0],p[1],p[2]
                    if w >= self.T:
                        if np.abs(pixel - u) <= self.D * np.sqrt(s):
                            imgfront[r,c] = 0
                            break
                        else:
                            imgfront[r,c] = 255
        return imgfront,imgback
import matplotlib.pyplot as plt
cap = cv2.VideoCapture("highwayII_raw.AVI")
if not cap.isOpened:
    print("open vedio failed.")
    
"""kf = cv2.KalmanFilter(4,4)
kf.transitionMatrix = np.eye(4)
kf.measurementMatrix = np.eye(4)
kf.processNoiseCov = np.ones((4,4)) * 1e-5
kf.measurementNoiseCov = np.ones((4,4)) * 1e-1
kf.errorCovPost = np.ones((4,4))
kf.statePost = (0,1,4,2)"""

detector = Detector2()
success,frame = cap.read()
src = np.copy(frame)
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
detector.update(frame)
while success:
    im,rois = detector.detect(frame)    
    for roi in rois: 
        #pred = kf.predict()
        #kf.correct(roi)
        cv2.rectangle(src,(roi[0],roi[1]),(roi[0]+ roi[2],roi[1]+roi[3]),(255,0,0),1)
        #cv2.rectangle(src,(pred[0],pred[1]),(pred[0]+ pred[2],pred[1]+pred[3]),(0,255,0),1)
    cv2.imshow("raw",src)
    cv2.imshow("vedio",im)
    cv2.waitKey(30)
    success,frame = cap.read()
    if not success:
        break
    src = np.copy(frame)
    frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
    detector.update(frame)
E:\Miniconda3\lib\site-packages\ipykernel\__main__.py:21: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
cap = cv2.VideoCapture("highwayII_raw.AVI")
if not cap.isOpened:
    print("open vedio failed.")

success,frame = cap.read()
detectorG = GMM(frame.shape)
src = np.copy(frame)
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
while success:
    im,imback = detectorG.detect(frame)
    cv2.imshow("raw",src)
    cv2.imshow("vedio",im.astype(np.uint8))
    cv2.imshow("back",imback.astype(np.uint8))
    cv2.waitKey(10)
    success,frame = cap.read()
    if not success:
        break
    src = np.copy(frame)
    frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)

输入图片说明 输入图片说明 输入图片说明

© 著作权归作者所有

共有 人打赏支持
二胡艺
粉丝 3
博文 42
码字总数 12555
作品 0
镇江
程序员

暂无文章

基于TP5的微信的公众号获取登录用户信息

之前讲过微信的公众号自动登录的菜单配置,这次记录一下在TP5项目中获取自动登录的用户信息并存到数据库的操作 基本的流程为:微信设置自动登录的菜单—>访问的URL指定的函数里获取用户信息—...

月夜中徘徊
今天
0
0
youTrack

package jetbrains.teamsys.license.runtime; 计算lis package jetbrains.ring.license.reader; 验证lis 安装后先不要生成lis,要把相关文件进行替换 ring-license-checker-1.0.41.jar char......

max佩恩
今天
0
0
12.17 Nginx负载均衡

Nginx负载均衡 下面的dig看到可以返回2个IP,就是解析出来的IP,这样我们可以做负载均衡。 dig www.qq.com 1.vim /usr/local/nginx/conf/vhost/fuzai.conf 2.添加如下配置 upstream qq //定义...

芬野de博客
今天
0
0
SSE(Server Send Event 服务端发送事件)

package com.example.demo.controller;import org.springframework.stereotype.Controller;import org.springframework.web.bind.annotation.RequestMapping;import org.springframe......

Canaan_
今天
0
0
jvm调优

1.jvm运行模式 client模式:启动快,占用内存少,jit编译器生成代码的速度也更快. server模式:主要优势在于代码优化功能,这个功能对于服务器应用而言尤其重要. tiered server模式:结合了client与...

Funcy1122
今天
1
0

没有更多内容

加载失败,请刷新页面

加载更多

下一页

返回顶部
顶部