Index: content/browser/speech/endpointer/endpointer_unittest.cc |
diff --git a/content/browser/speech/endpointer/endpointer_unittest.cc b/content/browser/speech/endpointer/endpointer_unittest.cc |
index ec0aac6af545a3ef859a8d9f048eb20e77c7f938..807b6f6828705b2bffc41248eb5174963003ad11 100644 |
--- a/content/browser/speech/endpointer/endpointer_unittest.cc |
+++ b/content/browser/speech/endpointer/endpointer_unittest.cc |
@@ -73,9 +73,7 @@ class EnergyEndpointerFrameProcessor : public FrameProcessor { |
explicit EnergyEndpointerFrameProcessor(EnergyEndpointer* endpointer) |
: endpointer_(endpointer) {} |
- virtual EpStatus ProcessFrame(int64 time, |
- int16* samples, |
- int frame_size) override { |
+ EpStatus ProcessFrame(int64 time, int16* samples, int frame_size) override { |
endpointer_->ProcessAudioFrame(time, samples, kFrameSize, NULL); |
int64 ep_time; |
return endpointer_->Status(&ep_time); |
@@ -118,9 +116,7 @@ class EndpointerFrameProcessor : public FrameProcessor { |
explicit EndpointerFrameProcessor(Endpointer* endpointer) |
: endpointer_(endpointer) {} |
- virtual EpStatus ProcessFrame(int64 time, |
- int16* samples, |
- int frame_size) override { |
+ EpStatus ProcessFrame(int64 time, int16* samples, int frame_size) override { |
scoped_refptr<AudioChunk> frame( |
new AudioChunk(reinterpret_cast<uint8*>(samples), kFrameSize * 2, 2)); |
endpointer_->ProcessAudio(*frame.get(), NULL); |