Introduction
The python framesamplingfilter example is extracted from the most popular open source projects, you can refer to the following example for usage.
Programming language: Python
Namespace/package name: pliersfilters
Example#1File:
test_video_filters.pyProject:
tyarkoni/featureX
def test_frame_sampling_video_filter():
filename = join(VIDEO_DIR, 'small.mp4')
video = VideoStim(filename, onset=4.2)
assert video.fps == 30
assert video.n_frames in (167, 168)
assert video.width == 560
# Test frame filters
conv = FrameSamplingFilter(every=3)
derived = conv.transform(video)
assert derived.n_frames == math.ceil(video.n_frames / 3.0)
assert derived.duration == video.duration
first = next(f for f in derived)
assert type(first) == VideoFrameStim
assert first.name == 'frame[0]'
assert first.onset == 4.2
assert first.duration == 3 * (1 / 30.0)
second = [f for f in derived][1]
assert second.onset == 4.3
with pytest.raises(TypeError):
derived.get_frame(onset=1.0)
# Commented out because no longer allowing sampling filter chaining
# conv = FrameSamplingFilter(hertz=15)
# derived = conv.transform(derived)
# assert derived.n_frames == math.ceil(video.n_frames / 6.0)
# first = next(f for f in derived)
# assert type(first) == VideoFrameStim
# assert first.duration == 3 * (1 / 15.0)
# second = [f for f in derived][1]
# assert second.onset == 4.4
with pytest.raises(TypeError):
conv.transform(derived)
Example#2File:
test_google_extractors.pyProject:
tyarkoni/featureX
def test_google_vision_face_batch():
stims = ['apple', 'obama', 'thai_people']
stim_files = [join(get_test_data_path(), 'image', '%s.jpg' % s)
for s in stims]
stims = [ImageStim(s) for s in stim_files]
ext = GoogleVisionAPIFaceExtractor(batch_size=5)
result = ext.transform(stims)
result = merge_results(result, format='wide', extractor_names=False,
handle_annotations='first')
assert result.shape == (2, 139)
assert 'joyLikelihood' in result.columns
assert result['joyLikelihood'][0] == 'VERY_LIKELY'
assert result['joyLikelihood'][1] == 'VERY_LIKELY'
video = VideoStim(join(VIDEO_DIR, 'obama_speech.mp4'))
conv = FrameSamplingFilter(every=10)
video = conv.transform(video)
result = ext.transform(video)
result = merge_results(result, format='wide', extractor_names=False)
assert 'joyLikelihood' in result.columns
assert result.shape == (22, 139)
video = VideoStim(join(VIDEO_DIR, 'small.mp4'))
video = conv.transform(video)
result = ext.transform(video)
result = merge_results(result, format='wide', extractor_names=False)
assert 'joyLikelihood' not in result.columns
assert len(result) == 0
Example#3File:
test_video_filters.pyProject:
tyarkoni/featureX
def test_frame_sampling_cv2():
pytest.importorskip('cv2')
filename = join(VIDEO_DIR, 'small.mp4')
video = VideoStim(filename)
conv = FrameSamplingFilter(top_n=5)
derived = conv.transform(video)
assert derived.n_frames == 5
assert type(next(f for f in derived)) == VideoFrameStim
Example#4File:
test_video_filters.pyProject:
tyarkoni/featureX
def test_frame_sampling_video_filter2():
filename = join(VIDEO_DIR, 'obama_speech.mp4')
video = VideoStim(filename, onset=4.2)
assert video.fps == 12
assert video.n_frames == 105
# Test frame indices
conv = FrameSamplingFilter(every=3)
derived = conv.transform(video)
assert derived.n_frames == 35
assert derived.frame_index[4] == 12
conv = FrameSamplingFilter(hertz=3)
derived = conv.transform(video)
assert derived.n_frames == 27
assert derived.frame_index[3] == 12
conv = FrameSamplingFilter(hertz=24)
derived = conv.transform(video)
assert derived.n_frames == 210
assert derived.frame_index[4] == 2
video.fps = 11.8
conv = FrameSamplingFilter(hertz=1)
derived = conv.transform(video)
assert derived.n_frames == 9
assert derived.frame_index[4] == 47
assert derived.frame_index[5] == 59
Example#5File:
test_utils.pyProject:
tyarkoni/featureX
def test_progress_bar(capfd):
video_dir = join(get_test_data_path(), 'video')
video = VideoStim(join(video_dir, 'obama_speech.mp4'))
conv = FrameSamplingFilter(hertz=2)
old_val = config.get_option('progress_bar')
config.set_option('progress_bar', True)
derived = conv.transform(video)
out, err = capfd.readouterr()
assert 'Video frame:' in err and '100%' in err
config.set_option('progress_bar', False)
derived = conv.transform(video)
out, err = capfd.readouterr()
assert 'Video frame:' not in err and '100%' not in err
config.set_option('progress_bar', old_val)