<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>[212727] trunk/Source/WebCore</title>
</head>
<body>

<style type="text/css"><!--
#msg dl.meta { border: 1px #006 solid; background: #369; padding: 6px; color: #fff; }
#msg dl.meta dt { float: left; width: 6em; font-weight: bold; }
#msg dt:after { content:':';}
#msg dl, #msg dt, #msg ul, #msg li, #header, #footer, #logmsg { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt;  }
#msg dl a { font-weight: bold}
#msg dl a:link    { color:#fc3; }
#msg dl a:active  { color:#ff0; }
#msg dl a:visited { color:#cc6; }
h3 { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; font-weight: bold; }
#msg pre { overflow: auto; background: #ffc; border: 1px #fa0 solid; padding: 6px; }
#logmsg { background: #ffc; border: 1px #fa0 solid; padding: 1em 1em 0 1em; }
#logmsg p, #logmsg pre, #logmsg blockquote { margin: 0 0 1em 0; }
#logmsg p, #logmsg li, #logmsg dt, #logmsg dd { line-height: 14pt; }
#logmsg h1, #logmsg h2, #logmsg h3, #logmsg h4, #logmsg h5, #logmsg h6 { margin: .5em 0; }
#logmsg h1:first-child, #logmsg h2:first-child, #logmsg h3:first-child, #logmsg h4:first-child, #logmsg h5:first-child, #logmsg h6:first-child { margin-top: 0; }
#logmsg ul, #logmsg ol { padding: 0; list-style-position: inside; margin: 0 0 0 1em; }
#logmsg ul { text-indent: -1em; padding-left: 1em; }#logmsg ol { text-indent: -1.5em; padding-left: 1.5em; }
#logmsg > ul, #logmsg > ol { margin: 0 0 1em 0; }
#logmsg pre { background: #eee; padding: 1em; }
#logmsg blockquote { border: 1px solid #fa0; border-left-width: 10px; padding: 1em 1em 0 1em; background: white;}
#logmsg dl { margin: 0; }
#logmsg dt { font-weight: bold; }
#logmsg dd { margin: 0; padding: 0 0 0.5em 0; }
#logmsg dd:before { content:'\00bb';}
#logmsg table { border-spacing: 0px; border-collapse: collapse; border-top: 4px solid #fa0; border-bottom: 1px solid #fa0; background: #fff; }
#logmsg table th { text-align: left; font-weight: normal; padding: 0.2em 0.5em; border-top: 1px dotted #fa0; }
#logmsg table td { text-align: right; border-top: 1px dotted #fa0; padding: 0.2em 0.5em; }
#logmsg table thead th { text-align: center; border-bottom: 1px solid #fa0; }
#logmsg table th.Corner { text-align: left; }
#logmsg hr { border: none 0; border-top: 2px dashed #fa0; height: 1px; }
#header, #footer { color: #fff; background: #636; border: 1px #300 solid; padding: 6px; }
#patch { width: 100%; }
#patch h4 {font-family: verdana,arial,helvetica,sans-serif;font-size:10pt;padding:8px;background:#369;color:#fff;margin:0;}
#patch .propset h4, #patch .binary h4 {margin:0;}
#patch pre {padding:0;line-height:1.2em;margin:0;}
#patch .diff {width:100%;background:#eee;padding: 0 0 10px 0;overflow:auto;}
#patch .propset .diff, #patch .binary .diff  {padding:10px 0;}
#patch span {display:block;padding:0 10px;}
#patch .modfile, #patch .addfile, #patch .delfile, #patch .propset, #patch .binary, #patch .copfile {border:1px solid #ccc;margin:10px 0;}
#patch ins {background:#dfd;text-decoration:none;display:block;padding:0 10px;}
#patch del {background:#fdd;text-decoration:none;display:block;padding:0 10px;}
#patch .lines, .info {color:#888;background:#fff;}
--></style>
<div id="msg">
<dl class="meta">
<dt>Revision</dt> <dd><a href="http://trac.webkit.org/projects/webkit/changeset/212727">212727</a></dd>
<dt>Author</dt> <dd>jer.noble@apple.com</dd>
<dt>Date</dt> <dd>2017-02-21 10:45:10 -0800 (Tue, 21 Feb 2017)</dd>
</dl>

<h3>Log Message</h3>
<pre>Make logging in high-priority audio threads less expensive
https://bugs.webkit.org/show_bug.cgi?id=168639

Reviewed by Jon Lee.

Logging from inside a high-priority audio thread will make a number of calls to malloc, block, and
therefore cause audio glitches. Make this logging less expensive by dispatching to the main thread
before creating and outputting the log string.

* WebCore.xcodeproj/project.pbxproj:
* platform/audio/mac/AudioSampleDataSource.mm: Renamed from Source/WebCore/platform/audio/mac/AudioSampleDataSource.cpp.
(WebCore::AudioSampleDataSource::pushSamplesInternal):
(WebCore::AudioSampleDataSource::pullSamplesInternal):</pre>

<h3>Modified Paths</h3>
<ul>
<li><a href="#trunkSourceWebCoreChangeLog">trunk/Source/WebCore/ChangeLog</a></li>
<li><a href="#trunkSourceWebCoreWebCorexcodeprojprojectpbxproj">trunk/Source/WebCore/WebCore.xcodeproj/project.pbxproj</a></li>
</ul>

<h3>Added Paths</h3>
<ul>
<li><a href="#trunkSourceWebCoreplatformaudiomacAudioSampleDataSourcemm">trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.mm</a></li>
</ul>

<h3>Removed Paths</h3>
<ul>
<li><a href="#trunkSourceWebCoreplatformaudiomacAudioSampleDataSourcecpp">trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.cpp</a></li>
</ul>

</div>
<div id="patch">
<h3>Diff</h3>
<a id="trunkSourceWebCoreChangeLog"></a>
<div class="modfile"><h4>Modified: trunk/Source/WebCore/ChangeLog (212726 => 212727)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/WebCore/ChangeLog        2017-02-21 18:36:00 UTC (rev 212726)
+++ trunk/Source/WebCore/ChangeLog        2017-02-21 18:45:10 UTC (rev 212727)
</span><span class="lines">@@ -1,5 +1,21 @@
</span><span class="cx"> 2017-02-21  Jer Noble  &lt;jer.noble@apple.com&gt;
</span><span class="cx"> 
</span><ins>+        Make logging in high-priority audio threads less expensive
+        https://bugs.webkit.org/show_bug.cgi?id=168639
+
+        Reviewed by Jon Lee.
+
+        Logging from inside a high-priority audio thread will make a number of calls to malloc, block, and
+        therefore cause audio glitches. Make this logging less expensive by dispatching to the main thread
+        before creating and outputting the log string.
+
+        * WebCore.xcodeproj/project.pbxproj:
+        * platform/audio/mac/AudioSampleDataSource.mm: Renamed from Source/WebCore/platform/audio/mac/AudioSampleDataSource.cpp.
+        (WebCore::AudioSampleDataSource::pushSamplesInternal):
+        (WebCore::AudioSampleDataSource::pullSamplesInternal):
+
+2017-02-21  Jer Noble  &lt;jer.noble@apple.com&gt;
+
</ins><span class="cx">         Give the Mock audio input a &quot;hum&quot; to make drop-outs more detectable
</span><span class="cx">         https://bugs.webkit.org/show_bug.cgi?id=168641
</span><span class="cx"> 
</span></span></pre></div>
<a id="trunkSourceWebCoreWebCorexcodeprojprojectpbxproj"></a>
<div class="modfile"><h4>Modified: trunk/Source/WebCore/WebCore.xcodeproj/project.pbxproj (212726 => 212727)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/WebCore/WebCore.xcodeproj/project.pbxproj        2017-02-21 18:36:00 UTC (rev 212726)
+++ trunk/Source/WebCore/WebCore.xcodeproj/project.pbxproj        2017-02-21 18:45:10 UTC (rev 212727)
</span><span class="lines">@@ -144,7 +144,7 @@
</span><span class="cx">                 07394ECA1BAB2CD700BE99CD /* MediaDevicesRequest.h in Headers */ = {isa = PBXBuildFile; fileRef = 07394EC91BAB2CD700BE99CD /* MediaDevicesRequest.h */; settings = {ATTRIBUTES = (Private, ); }; };
</span><span class="cx">                 073B87661E4385AC0071C0EC /* AudioSampleBufferList.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 073B87621E43859D0071C0EC /* AudioSampleBufferList.cpp */; };
</span><span class="cx">                 073B87671E4385AC0071C0EC /* AudioSampleBufferList.h in Headers */ = {isa = PBXBuildFile; fileRef = 073B87631E43859D0071C0EC /* AudioSampleBufferList.h */; };
</span><del>-                073B87681E4385AC0071C0EC /* AudioSampleDataSource.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 073B87641E43859D0071C0EC /* AudioSampleDataSource.cpp */; };
</del><ins>+                073B87681E4385AC0071C0EC /* AudioSampleDataSource.mm in Sources */ = {isa = PBXBuildFile; fileRef = 073B87641E43859D0071C0EC /* AudioSampleDataSource.mm */; };
</ins><span class="cx">                 073B87691E4385AC0071C0EC /* AudioSampleDataSource.h in Headers */ = {isa = PBXBuildFile; fileRef = 073B87651E43859D0071C0EC /* AudioSampleDataSource.h */; };
</span><span class="cx">                 073BE34017D17E01002BD431 /* JSNavigatorUserMedia.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 073BE33E17D17E01002BD431 /* JSNavigatorUserMedia.cpp */; };
</span><span class="cx">                 073BE34117D17E01002BD431 /* JSNavigatorUserMedia.h in Headers */ = {isa = PBXBuildFile; fileRef = 073BE33F17D17E01002BD431 /* JSNavigatorUserMedia.h */; settings = {ATTRIBUTES = (Private, ); }; };
</span><span class="lines">@@ -7270,7 +7270,7 @@
</span><span class="cx">                 073B87581E40DCFD0071C0EC /* CAAudioStreamDescription.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CAAudioStreamDescription.h; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="cx">                 073B87621E43859D0071C0EC /* AudioSampleBufferList.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AudioSampleBufferList.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="cx">                 073B87631E43859D0071C0EC /* AudioSampleBufferList.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioSampleBufferList.h; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><del>-                073B87641E43859D0071C0EC /* AudioSampleDataSource.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AudioSampleDataSource.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
</del><ins>+                073B87641E43859D0071C0EC /* AudioSampleDataSource.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = AudioSampleDataSource.mm; sourceTree = &quot;&lt;group&gt;&quot;; };
</ins><span class="cx">                 073B87651E43859D0071C0EC /* AudioSampleDataSource.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioSampleDataSource.h; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="cx">                 073BE33E17D17E01002BD431 /* JSNavigatorUserMedia.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSNavigatorUserMedia.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="cx">                 073BE33F17D17E01002BD431 /* JSNavigatorUserMedia.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSNavigatorUserMedia.h; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="lines">@@ -25107,7 +25107,7 @@
</span><span class="cx">                                 CD2F4A2618D8A3490063746D /* AudioHardwareListenerMac.h */,
</span><span class="cx">                                 073B87621E43859D0071C0EC /* AudioSampleBufferList.cpp */,
</span><span class="cx">                                 073B87631E43859D0071C0EC /* AudioSampleBufferList.h */,
</span><del>-                                073B87641E43859D0071C0EC /* AudioSampleDataSource.cpp */,
</del><ins>+                                073B87641E43859D0071C0EC /* AudioSampleDataSource.mm */,
</ins><span class="cx">                                 073B87651E43859D0071C0EC /* AudioSampleDataSource.h */,
</span><span class="cx">                                 CD54DE4917469C6D005E5B36 /* AudioSessionMac.cpp */,
</span><span class="cx">                                 073B87571E40DCFD0071C0EC /* CAAudioStreamDescription.cpp */,
</span><span class="lines">@@ -29327,7 +29327,7 @@
</span><span class="cx">                                 FD31608512B026F700C1A359 /* AudioResampler.cpp in Sources */,
</span><span class="cx">                                 FD31608712B026F700C1A359 /* AudioResamplerKernel.cpp in Sources */,
</span><span class="cx">                                 073B87661E4385AC0071C0EC /* AudioSampleBufferList.cpp in Sources */,
</span><del>-                                073B87681E4385AC0071C0EC /* AudioSampleDataSource.cpp in Sources */,
</del><ins>+                                073B87681E4385AC0071C0EC /* AudioSampleDataSource.mm in Sources */,
</ins><span class="cx">                                 FD8C46EB154608E700A5910C /* AudioScheduledSourceNode.cpp in Sources */,
</span><span class="cx">                                 CDA79824170A258300D45C55 /* AudioSession.cpp in Sources */,
</span><span class="cx">                                 CDA79827170A279100D45C55 /* AudioSessionIOS.mm in Sources */,
</span></span></pre></div>
<a id="trunkSourceWebCoreplatformaudiomacAudioSampleDataSourcecpp"></a>
<div class="delfile"><h4>Deleted: trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.cpp (212726 => 212727)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.cpp        2017-02-21 18:36:00 UTC (rev 212726)
+++ trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.cpp        2017-02-21 18:45:10 UTC (rev 212727)
</span><span class="lines">@@ -1,355 +0,0 @@
</span><del>-/*
- * Copyright (C) 2017 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include &quot;config.h&quot;
-#include &quot;AudioSampleDataSource.h&quot;
-
-#if ENABLE(MEDIA_STREAM)
-
-#include &quot;CAAudioStreamDescription.h&quot;
-#include &quot;CARingBuffer.h&quot;
-#include &quot;Logging.h&quot;
-#include &quot;MediaTimeAVFoundation.h&quot;
-#include &lt;AudioToolbox/AudioConverter.h&gt;
-#include &lt;mach/mach.h&gt;
-#include &lt;mach/mach_time.h&gt;
-#include &lt;mutex&gt;
-#include &lt;syslog.h&gt;
-#include &lt;wtf/CurrentTime.h&gt;
-#include &lt;wtf/StringPrintStream.h&gt;
-
-#include &quot;CoreMediaSoftLink.h&quot;
-
-namespace WebCore {
-
-using namespace JSC;
-
-Ref&lt;AudioSampleDataSource&gt; AudioSampleDataSource::create(size_t maximumSampleCount)
-{
-    return adoptRef(*new AudioSampleDataSource(maximumSampleCount));
-}
-
-AudioSampleDataSource::AudioSampleDataSource(size_t maximumSampleCount)
-    : m_inputSampleOffset(MediaTime::invalidTime())
-    , m_maximumSampleCount(maximumSampleCount)
-{
-}
-
-AudioSampleDataSource::~AudioSampleDataSource()
-{
-    m_inputDescription = nullptr;
-    m_outputDescription = nullptr;
-    m_ringBuffer = nullptr;
-    if (m_converter) {
-        AudioConverterDispose(m_converter);
-        m_converter = nullptr;
-    }
-}
-
-void AudioSampleDataSource::setPaused(bool paused)
-{
-    std::lock_guard&lt;Lock&gt; lock(m_lock);
-
-    if (paused == m_paused)
-        return;
-
-    m_transitioningFromPaused = m_paused;
-    m_paused = paused;
-}
-
-OSStatus AudioSampleDataSource::setupConverter()
-{
-    ASSERT(m_inputDescription &amp;&amp; m_outputDescription);
-
-    if (m_converter) {
-        AudioConverterDispose(m_converter);
-        m_converter = nullptr;
-    }
-
-    if (*m_inputDescription == *m_outputDescription)
-        return 0;
-
-    OSStatus err = AudioConverterNew(&amp;m_inputDescription-&gt;streamDescription(), &amp;m_outputDescription-&gt;streamDescription(), &amp;m_converter);
-    if (err)
-        LOG_ERROR(&quot;AudioSampleDataSource::setupConverter(%p) - AudioConverterNew returned error %d (%.4s)&quot;, this, (int)err, (char*)&amp;err);
-
-    return err;
-
-}
-
-OSStatus AudioSampleDataSource::setInputFormat(const CAAudioStreamDescription&amp; format)
-{
-    ASSERT(format.sampleRate() &gt;= 0);
-
-    m_inputDescription = std::make_unique&lt;CAAudioStreamDescription&gt;(format);
-    if (m_outputDescription)
-        return setupConverter();
-
-    return 0;
-}
-
-OSStatus AudioSampleDataSource::setOutputFormat(const CAAudioStreamDescription&amp; format)
-{
-    ASSERT(m_inputDescription);
-    ASSERT(format.sampleRate() &gt;= 0);
-
-    m_outputDescription = std::make_unique&lt;CAAudioStreamDescription&gt;(format);
-    if (!m_ringBuffer)
-        m_ringBuffer = std::make_unique&lt;CARingBuffer&gt;();
-
-    m_ringBuffer-&gt;allocate(format, static_cast&lt;size_t&gt;(m_maximumSampleCount));
-    m_scratchBuffer = AudioSampleBufferList::create(m_outputDescription-&gt;streamDescription(), m_maximumSampleCount);
-
-    return setupConverter();
-}
-
-MediaTime AudioSampleDataSource::hostTime() const
-{
-    // Based on listing #2 from Apple Technical Q&amp;A QA1398, modified to be thread-safe.
-    static double frequency;
-    static mach_timebase_info_data_t timebaseInfo;
-    static std::once_flag initializeTimerOnceFlag;
-    std::call_once(initializeTimerOnceFlag, [] {
-        kern_return_t kr = mach_timebase_info(&amp;timebaseInfo);
-        frequency = 1e-9 * static_cast&lt;double&gt;(timebaseInfo.numer) / static_cast&lt;double&gt;(timebaseInfo.denom);
-        ASSERT_UNUSED(kr, kr == KERN_SUCCESS);
-        ASSERT(timebaseInfo.denom);
-    });
-
-    return MediaTime::createWithDouble(mach_absolute_time() * frequency);
-}
-
-void AudioSampleDataSource::pushSamplesInternal(const AudioBufferList&amp; bufferList, const MediaTime&amp; presentationTime, size_t sampleCount)
-{
-    ASSERT(m_lock.isHeld());
-
-    const AudioBufferList* sampleBufferList;
-    if (m_converter) {
-        m_scratchBuffer-&gt;reset();
-        OSStatus err = m_scratchBuffer-&gt;copyFrom(bufferList, m_converter);
-        if (err)
-            return;
-
-        sampleBufferList = m_scratchBuffer-&gt;bufferList().list();
-    } else
-        sampleBufferList = &amp;bufferList;
-
-    MediaTime sampleTime = presentationTime;
-    if (m_inputSampleOffset == MediaTime::invalidTime()) {
-        m_inputSampleOffset = MediaTime(1 - sampleTime.timeValue(), sampleTime.timeScale());
-        if (m_inputSampleOffset.timeScale() != sampleTime.timeScale()) {
-            // FIXME: It should be possible to do this without calling CMTimeConvertScale.
-            m_inputSampleOffset = toMediaTime(CMTimeConvertScale(toCMTime(m_inputSampleOffset), sampleTime.timeScale(), kCMTimeRoundingMethod_Default));
-        }
-        LOG(MediaCaptureSamples, &quot;@@ pushSamples: input sample offset is %lld, m_maximumSampleCount = %zu&quot;, m_inputSampleOffset.timeValue(), m_maximumSampleCount);
-    }
-    sampleTime += m_inputSampleOffset;
-
-#if !LOG_DISABLED
-    uint64_t startFrame1 = 0;
-    uint64_t endFrame1 = 0;
-    m_ringBuffer-&gt;getCurrentFrameBounds(startFrame1, endFrame1);
-#endif
-
-    m_ringBuffer-&gt;store(sampleBufferList, sampleCount, sampleTime.timeValue());
-    m_timeStamp = sampleTime.timeValue();
-
-    LOG(MediaCaptureSamples, &quot;@@ pushSamples: added %ld samples for time = %s (was %s), mach time = %lld&quot;, sampleCount, toString(sampleTime).utf8().data(), toString(presentationTime).utf8().data(), mach_absolute_time());
-
-#if !LOG_DISABLED
-    uint64_t startFrame2 = 0;
-    uint64_t endFrame2 = 0;
-    m_ringBuffer-&gt;getCurrentFrameBounds(startFrame2, endFrame2);
-    LOG(MediaCaptureSamples, &quot;@@ pushSamples: buffered range was [%lld .. %lld], is [%lld .. %lld]&quot;, startFrame1, endFrame1, startFrame2, endFrame2);
-#endif
-}
-
-void AudioSampleDataSource::pushSamples(const AudioStreamBasicDescription&amp; sampleDescription, CMSampleBufferRef sampleBuffer)
-{
-    std::lock_guard&lt;Lock&gt; lock(m_lock);
-
-    ASSERT_UNUSED(sampleDescription, *m_inputDescription == sampleDescription);
-    ASSERT(m_ringBuffer);
-    
-    WebAudioBufferList list(*m_inputDescription, sampleBuffer);
-    pushSamplesInternal(list, toMediaTime(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)), CMSampleBufferGetNumSamples(sampleBuffer));
-}
-
-void AudioSampleDataSource::pushSamples(const MediaTime&amp; sampleTime, const PlatformAudioData&amp; audioData, size_t sampleCount)
-{
-    std::unique_lock&lt;Lock&gt; lock(m_lock, std::try_to_lock);
-    ASSERT(is&lt;WebAudioBufferList&gt;(audioData));
-    pushSamplesInternal(*downcast&lt;WebAudioBufferList&gt;(audioData).list(), sampleTime, sampleCount);
-}
-
-bool AudioSampleDataSource::pullSamplesInternal(AudioBufferList&amp; buffer, size_t&amp; sampleCount, uint64_t timeStamp, double /*hostTime*/, PullMode mode)
-{
-    ASSERT(m_lock.isHeld());
-    size_t byteCount = sampleCount * m_outputDescription-&gt;bytesPerFrame();
-
-    ASSERT(buffer.mNumberBuffers == m_ringBuffer-&gt;channelCount());
-    if (buffer.mNumberBuffers != m_ringBuffer-&gt;channelCount()) {
-        AudioSampleBufferList::zeroABL(buffer, byteCount);
-        sampleCount = 0;
-        return false;
-    }
-
-    if (!m_ringBuffer || m_muted || m_inputSampleOffset == MediaTime::invalidTime()) {
-        AudioSampleBufferList::zeroABL(buffer, byteCount);
-        sampleCount = 0;
-        return false;
-    }
-
-    uint64_t startFrame = 0;
-    uint64_t endFrame = 0;
-    m_ringBuffer-&gt;getCurrentFrameBounds(startFrame, endFrame);
-
-    if (m_transitioningFromPaused) {
-        uint64_t buffered = endFrame - m_timeStamp;
-        if (buffered &lt; sampleCount * 2) {
-            AudioSampleBufferList::zeroABL(buffer, byteCount);
-            sampleCount = 0;
-            return false;
-        }
-
-        const double twentyMS = .02;
-        const double tenMS = .01;
-        const double fiveMS = .005;
-        double sampleRate = m_outputDescription-&gt;sampleRate();
-        m_outputSampleOffset = timeStamp + m_timeStamp;
-        if (buffered &gt; sampleRate * twentyMS)
-            m_outputSampleOffset -= sampleRate * twentyMS;
-        else if (buffered &gt; sampleRate * tenMS)
-            m_outputSampleOffset -= sampleRate * tenMS;
-        else if (buffered &gt; sampleRate * fiveMS)
-            m_outputSampleOffset -= sampleRate * fiveMS;
-
-        m_transitioningFromPaused = false;
-    }
-
-    timeStamp += m_outputSampleOffset;
-
-    LOG(MediaCaptureSamples, &quot;** pullSamples: asking for %ld samples at time = %lld (was %lld)&quot;, sampleCount, timeStamp, timeStamp - m_outputSampleOffset);
-
-    uint64_t framesAvailable = sampleCount;
-    if (timeStamp &lt; startFrame || timeStamp + sampleCount &gt; endFrame) {
-        if (timeStamp + sampleCount &lt; startFrame || timeStamp &gt; endFrame)
-            framesAvailable = 0;
-        else if (timeStamp &lt; startFrame)
-            framesAvailable = timeStamp + sampleCount - startFrame;
-        else
-            framesAvailable = timeStamp + sampleCount - endFrame;
-
-        LOG(MediaCaptureSamples, &quot;** pullSamplesInternal: sample %lld is not completely in range [%lld .. %lld], returning %lld frames&quot;, timeStamp, startFrame, endFrame, framesAvailable);
-
-        if (framesAvailable &lt; sampleCount) {
-            const double twentyMS = .02;
-            double sampleRate = m_outputDescription-&gt;sampleRate();
-            auto delta = static_cast&lt;int64_t&gt;(timeStamp) - endFrame;
-            if (delta &gt; 0 &amp;&amp; delta &lt; sampleRate * twentyMS)
-                m_outputSampleOffset -= delta;
-        }
-
-        if (!framesAvailable) {
-            AudioSampleBufferList::zeroABL(buffer, byteCount);
-            return false;
-        }
-    }
-
-    if (m_volume &gt;= .95) {
-        m_ringBuffer-&gt;fetch(&amp;buffer, sampleCount, timeStamp, mode == Copy ? CARingBuffer::Copy : CARingBuffer::Mix);
-        return true;
-    }
-
-    if (m_scratchBuffer-&gt;copyFrom(*m_ringBuffer.get(), sampleCount, timeStamp, mode == Copy ? CARingBuffer::Copy : CARingBuffer::Mix)) {
-        AudioSampleBufferList::zeroABL(buffer, sampleCount);
-        return false;
-    }
-
-    m_scratchBuffer-&gt;applyGain(m_volume);
-    if (m_scratchBuffer-&gt;copyTo(buffer, sampleCount))
-        AudioSampleBufferList::zeroABL(buffer, byteCount);
-
-    return true;
-}
-
-bool AudioSampleDataSource::pullAvalaibleSamplesAsChunks(AudioBufferList&amp; buffer, size_t sampleCountPerChunk, uint64_t timeStamp, Function&lt;void()&gt;&amp;&amp; consumeFilledBuffer)
-{
-    std::unique_lock&lt;Lock&gt; lock(m_lock, std::try_to_lock);
-    if (!lock.owns_lock() || !m_ringBuffer)
-        return false;
-
-    ASSERT(buffer.mNumberBuffers == m_ringBuffer-&gt;channelCount());
-    if (buffer.mNumberBuffers != m_ringBuffer-&gt;channelCount())
-        return false;
-
-    uint64_t startFrame = 0;
-    uint64_t endFrame = 0;
-    m_ringBuffer-&gt;getCurrentFrameBounds(startFrame, endFrame);
-    if (timeStamp &lt; startFrame)
-        return false;
-
-    startFrame = timeStamp;
-    while (endFrame - startFrame &gt;= sampleCountPerChunk) {
-        if (m_ringBuffer-&gt;fetch(&amp;buffer, sampleCountPerChunk, startFrame, CARingBuffer::Copy))
-            return false;
-        consumeFilledBuffer();
-        startFrame += sampleCountPerChunk;
-    }
-    return true;
-}
-
-bool AudioSampleDataSource::pullSamples(AudioBufferList&amp; buffer, size_t sampleCount, uint64_t timeStamp, double hostTime, PullMode mode)
-{
-    std::unique_lock&lt;Lock&gt; lock(m_lock, std::try_to_lock);
-    if (!lock.owns_lock() || !m_ringBuffer) {
-        size_t byteCount = sampleCount * m_outputDescription-&gt;bytesPerFrame();
-        AudioSampleBufferList::zeroABL(buffer, byteCount);
-        return false;
-    }
-
-    return pullSamplesInternal(buffer, sampleCount, timeStamp, hostTime, mode);
-}
-
-bool AudioSampleDataSource::pullSamples(AudioSampleBufferList&amp; buffer, size_t sampleCount, uint64_t timeStamp, double hostTime, PullMode mode)
-{
-    std::unique_lock&lt;Lock&gt; lock(m_lock, std::try_to_lock);
-    if (!lock.owns_lock() || !m_ringBuffer) {
-        buffer.zero();
-        return false;
-    }
-
-    if (!pullSamplesInternal(buffer.bufferList(), sampleCount, timeStamp, hostTime, mode))
-        return false;
-
-    buffer.setTimes(timeStamp, hostTime);
-    buffer.setSampleCount(sampleCount);
-
-    return true;
-}
-
-} // namespace WebCore
-
-#endif // ENABLE(MEDIA_STREAM)
</del></span></pre></div>
<a id="trunkSourceWebCoreplatformaudiomacAudioSampleDataSourcemmfromrev212726trunkSourceWebCoreplatformaudiomacAudioSampleDataSourcecpp"></a>
<div class="copfile"><h4>Copied: trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.mm (from rev 212726, trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.cpp) (0 => 212727)</h4>
<pre class="diff"><span>
<span class="info">--- trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.mm                                (rev 0)
+++ trunk/Source/WebCore/platform/audio/mac/AudioSampleDataSource.mm        2017-02-21 18:45:10 UTC (rev 212727)
</span><span class="lines">@@ -0,0 +1,365 @@
</span><ins>+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include &quot;config.h&quot;
+#include &quot;AudioSampleDataSource.h&quot;
+
+#if ENABLE(MEDIA_STREAM)
+
+#include &quot;CAAudioStreamDescription.h&quot;
+#include &quot;CARingBuffer.h&quot;
+#include &quot;Logging.h&quot;
+#include &quot;MediaTimeAVFoundation.h&quot;
+#include &lt;AudioToolbox/AudioConverter.h&gt;
+#include &lt;mach/mach.h&gt;
+#include &lt;mach/mach_time.h&gt;
+#include &lt;mutex&gt;
+#include &lt;syslog.h&gt;
+#include &lt;wtf/CurrentTime.h&gt;
+#include &lt;wtf/StringPrintStream.h&gt;
+
+#include &quot;CoreMediaSoftLink.h&quot;
+
+namespace WebCore {
+
+using namespace JSC;
+
+Ref&lt;AudioSampleDataSource&gt; AudioSampleDataSource::create(size_t maximumSampleCount)
+{
+    return adoptRef(*new AudioSampleDataSource(maximumSampleCount));
+}
+
+AudioSampleDataSource::AudioSampleDataSource(size_t maximumSampleCount)
+    : m_inputSampleOffset(MediaTime::invalidTime())
+    , m_maximumSampleCount(maximumSampleCount)
+{
+}
+
+AudioSampleDataSource::~AudioSampleDataSource()
+{
+    m_inputDescription = nullptr;
+    m_outputDescription = nullptr;
+    m_ringBuffer = nullptr;
+    if (m_converter) {
+        AudioConverterDispose(m_converter);
+        m_converter = nullptr;
+    }
+}
+
+void AudioSampleDataSource::setPaused(bool paused)
+{
+    std::lock_guard&lt;Lock&gt; lock(m_lock);
+
+    if (paused == m_paused)
+        return;
+
+    m_transitioningFromPaused = m_paused;
+    m_paused = paused;
+}
+
+OSStatus AudioSampleDataSource::setupConverter()
+{
+    ASSERT(m_inputDescription &amp;&amp; m_outputDescription);
+
+    if (m_converter) {
+        AudioConverterDispose(m_converter);
+        m_converter = nullptr;
+    }
+
+    if (*m_inputDescription == *m_outputDescription)
+        return 0;
+
+    OSStatus err = AudioConverterNew(&amp;m_inputDescription-&gt;streamDescription(), &amp;m_outputDescription-&gt;streamDescription(), &amp;m_converter);
+    if (err)
+        LOG_ERROR(&quot;AudioSampleDataSource::setupConverter(%p) - AudioConverterNew returned error %d (%.4s)&quot;, this, (int)err, (char*)&amp;err);
+
+    return err;
+
+}
+
+OSStatus AudioSampleDataSource::setInputFormat(const CAAudioStreamDescription&amp; format)
+{
+    ASSERT(format.sampleRate() &gt;= 0);
+
+    m_inputDescription = std::make_unique&lt;CAAudioStreamDescription&gt;(format);
+    if (m_outputDescription)
+        return setupConverter();
+
+    return 0;
+}
+
+OSStatus AudioSampleDataSource::setOutputFormat(const CAAudioStreamDescription&amp; format)
+{
+    ASSERT(m_inputDescription);
+    ASSERT(format.sampleRate() &gt;= 0);
+
+    m_outputDescription = std::make_unique&lt;CAAudioStreamDescription&gt;(format);
+    if (!m_ringBuffer)
+        m_ringBuffer = std::make_unique&lt;CARingBuffer&gt;();
+
+    m_ringBuffer-&gt;allocate(format, static_cast&lt;size_t&gt;(m_maximumSampleCount));
+    m_scratchBuffer = AudioSampleBufferList::create(m_outputDescription-&gt;streamDescription(), m_maximumSampleCount);
+
+    return setupConverter();
+}
+
+MediaTime AudioSampleDataSource::hostTime() const
+{
+    // Based on listing #2 from Apple Technical Q&amp;A QA1398, modified to be thread-safe.
+    static double frequency;
+    static mach_timebase_info_data_t timebaseInfo;
+    static std::once_flag initializeTimerOnceFlag;
+    std::call_once(initializeTimerOnceFlag, [] {
+        kern_return_t kr = mach_timebase_info(&amp;timebaseInfo);
+        frequency = 1e-9 * static_cast&lt;double&gt;(timebaseInfo.numer) / static_cast&lt;double&gt;(timebaseInfo.denom);
+        ASSERT_UNUSED(kr, kr == KERN_SUCCESS);
+        ASSERT(timebaseInfo.denom);
+    });
+
+    return MediaTime::createWithDouble(mach_absolute_time() * frequency);
+}
+
+void AudioSampleDataSource::pushSamplesInternal(const AudioBufferList&amp; bufferList, const MediaTime&amp; presentationTime, size_t sampleCount)
+{
+    ASSERT(m_lock.isHeld());
+
+    const AudioBufferList* sampleBufferList;
+    if (m_converter) {
+        m_scratchBuffer-&gt;reset();
+        OSStatus err = m_scratchBuffer-&gt;copyFrom(bufferList, m_converter);
+        if (err)
+            return;
+
+        sampleBufferList = m_scratchBuffer-&gt;bufferList().list();
+    } else
+        sampleBufferList = &amp;bufferList;
+
+    MediaTime sampleTime = presentationTime;
+    if (m_inputSampleOffset == MediaTime::invalidTime()) {
+        m_inputSampleOffset = MediaTime(1 - sampleTime.timeValue(), sampleTime.timeScale());
+        if (m_inputSampleOffset.timeScale() != sampleTime.timeScale()) {
+            // FIXME: It should be possible to do this without calling CMTimeConvertScale.
+            m_inputSampleOffset = toMediaTime(CMTimeConvertScale(toCMTime(m_inputSampleOffset), sampleTime.timeScale(), kCMTimeRoundingMethod_Default));
+        }
+        LOG(MediaCaptureSamples, &quot;@@ pushSamples: input sample offset is %lld, m_maximumSampleCount = %zu&quot;, m_inputSampleOffset.timeValue(), m_maximumSampleCount);
+    }
+    sampleTime += m_inputSampleOffset;
+
+#if !LOG_DISABLED
+    uint64_t startFrame1 = 0;
+    uint64_t endFrame1 = 0;
+    m_ringBuffer-&gt;getCurrentFrameBounds(startFrame1, endFrame1);
+#endif
+
+    m_ringBuffer-&gt;store(sampleBufferList, sampleCount, sampleTime.timeValue());
+    m_timeStamp = sampleTime.timeValue();
+
+
+#if !LOG_DISABLED
+    uint64_t startFrame2 = 0;
+    uint64_t endFrame2 = 0;
+    m_ringBuffer-&gt;getCurrentFrameBounds(startFrame2, endFrame2);
+    dispatch_async(dispatch_get_main_queue(), [sampleCount, sampleTime, presentationTime, absoluteTime = mach_absolute_time(), startFrame1, endFrame1, startFrame2, endFrame2] {
+        LOG(MediaCaptureSamples, &quot;@@ pushSamples: added %ld samples for time = %s (was %s), mach time = %lld&quot;, sampleCount, toString(sampleTime).utf8().data(), toString(presentationTime).utf8().data(), absoluteTime);
+        LOG(MediaCaptureSamples, &quot;@@ pushSamples: buffered range was [%lld .. %lld], is [%lld .. %lld]&quot;, startFrame1, endFrame1, startFrame2, endFrame2);
+    });
+#endif
+}
+
+void AudioSampleDataSource::pushSamples(const AudioStreamBasicDescription&amp; sampleDescription, CMSampleBufferRef sampleBuffer)
+{
+    std::lock_guard&lt;Lock&gt; lock(m_lock);
+
+    ASSERT_UNUSED(sampleDescription, *m_inputDescription == sampleDescription);
+    ASSERT(m_ringBuffer);
+    
+    WebAudioBufferList list(*m_inputDescription, sampleBuffer);
+    pushSamplesInternal(list, toMediaTime(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)), CMSampleBufferGetNumSamples(sampleBuffer));
+}
+
+void AudioSampleDataSource::pushSamples(const MediaTime&amp; sampleTime, const PlatformAudioData&amp; audioData, size_t sampleCount)
+{
+    std::unique_lock&lt;Lock&gt; lock(m_lock, std::try_to_lock);
+    ASSERT(is&lt;WebAudioBufferList&gt;(audioData));
+    pushSamplesInternal(*downcast&lt;WebAudioBufferList&gt;(audioData).list(), sampleTime, sampleCount);
+}
+
+bool AudioSampleDataSource::pullSamplesInternal(AudioBufferList&amp; buffer, size_t&amp; sampleCount, uint64_t timeStamp, double /*hostTime*/, PullMode mode)
+{
+    ASSERT(m_lock.isHeld());
+    size_t byteCount = sampleCount * m_outputDescription-&gt;bytesPerFrame();
+
+    ASSERT(buffer.mNumberBuffers == m_ringBuffer-&gt;channelCount());
+    if (buffer.mNumberBuffers != m_ringBuffer-&gt;channelCount()) {
+        AudioSampleBufferList::zeroABL(buffer, byteCount);
+        sampleCount = 0;
+        return false;
+    }
+
+    if (!m_ringBuffer || m_muted || m_inputSampleOffset == MediaTime::invalidTime()) {
+        AudioSampleBufferList::zeroABL(buffer, byteCount);
+        sampleCount = 0;
+        return false;
+    }
+
+    uint64_t startFrame = 0;
+    uint64_t endFrame = 0;
+    m_ringBuffer-&gt;getCurrentFrameBounds(startFrame, endFrame);
+
+    if (m_transitioningFromPaused) {
+        uint64_t buffered = endFrame - m_timeStamp;
+        if (buffered &lt; sampleCount * 2) {
+            AudioSampleBufferList::zeroABL(buffer, byteCount);
+            sampleCount = 0;
+            return false;
+        }
+
+        const double twentyMS = .02;
+        const double tenMS = .01;
+        const double fiveMS = .005;
+        double sampleRate = m_outputDescription-&gt;sampleRate();
+        m_outputSampleOffset = timeStamp + m_timeStamp;
+        if (buffered &gt; sampleRate * twentyMS)
+            m_outputSampleOffset -= sampleRate * twentyMS;
+        else if (buffered &gt; sampleRate * tenMS)
+            m_outputSampleOffset -= sampleRate * tenMS;
+        else if (buffered &gt; sampleRate * fiveMS)
+            m_outputSampleOffset -= sampleRate * fiveMS;
+
+        m_transitioningFromPaused = false;
+    }
+
+    timeStamp += m_outputSampleOffset;
+
+#if !LOG_DISABLED
+    dispatch_async(dispatch_get_main_queue(), [sampleCount, timeStamp, sampleOffset = m_outputSampleOffset] {
+        LOG(MediaCaptureSamples, &quot;** pullSamples: asking for %ld samples at time = %lld (was %lld)&quot;, sampleCount, timeStamp, timeStamp - sampleOffset);
+    });
+#endif
+
+    uint64_t framesAvailable = sampleCount;
+    if (timeStamp &lt; startFrame || timeStamp + sampleCount &gt; endFrame) {
+        if (timeStamp + sampleCount &lt; startFrame || timeStamp &gt; endFrame)
+            framesAvailable = 0;
+        else if (timeStamp &lt; startFrame)
+            framesAvailable = timeStamp + sampleCount - startFrame;
+        else
+            framesAvailable = timeStamp + sampleCount - endFrame;
+
+#if !LOG_DISABLED
+        dispatch_async(dispatch_get_main_queue(), [timeStamp, startFrame, endFrame, framesAvailable] {
+            LOG(MediaCaptureSamples, &quot;** pullSamplesInternal: sample %lld is not completely in range [%lld .. %lld], returning %lld frames&quot;, timeStamp, startFrame, endFrame, framesAvailable);
+        });
+#endif
+
+        if (framesAvailable &lt; sampleCount) {
+            const double twentyMS = .02;
+            double sampleRate = m_outputDescription-&gt;sampleRate();
+            auto delta = static_cast&lt;int64_t&gt;(timeStamp) - endFrame;
+            if (delta &gt; 0 &amp;&amp; delta &lt; sampleRate * twentyMS)
+                m_outputSampleOffset -= delta;
+        }
+
+        if (!framesAvailable) {
+            AudioSampleBufferList::zeroABL(buffer, byteCount);
+            return false;
+        }
+    }
+
+    if (m_volume &gt;= .95) {
+        m_ringBuffer-&gt;fetch(&amp;buffer, sampleCount, timeStamp, mode == Copy ? CARingBuffer::Copy : CARingBuffer::Mix);
+        return true;
+    }
+
+    if (m_scratchBuffer-&gt;copyFrom(*m_ringBuffer.get(), sampleCount, timeStamp, mode == Copy ? CARingBuffer::Copy : CARingBuffer::Mix)) {
+        AudioSampleBufferList::zeroABL(buffer, sampleCount);
+        return false;
+    }
+
+    m_scratchBuffer-&gt;applyGain(m_volume);
+    if (m_scratchBuffer-&gt;copyTo(buffer, sampleCount))
+        AudioSampleBufferList::zeroABL(buffer, byteCount);
+
+    return true;
+}
+
+bool AudioSampleDataSource::pullAvalaibleSamplesAsChunks(AudioBufferList&amp; buffer, size_t sampleCountPerChunk, uint64_t timeStamp, Function&lt;void()&gt;&amp;&amp; consumeFilledBuffer)
+{
+    std::unique_lock&lt;Lock&gt; lock(m_lock, std::try_to_lock);
+    if (!lock.owns_lock() || !m_ringBuffer)
+        return false;
+
+    ASSERT(buffer.mNumberBuffers == m_ringBuffer-&gt;channelCount());
+    if (buffer.mNumberBuffers != m_ringBuffer-&gt;channelCount())
+        return false;
+
+    uint64_t startFrame = 0;
+    uint64_t endFrame = 0;
+    m_ringBuffer-&gt;getCurrentFrameBounds(startFrame, endFrame);
+    if (timeStamp &lt; startFrame)
+        return false;
+
+    startFrame = timeStamp;
+    while (endFrame - startFrame &gt;= sampleCountPerChunk) {
+        if (m_ringBuffer-&gt;fetch(&amp;buffer, sampleCountPerChunk, startFrame, CARingBuffer::Copy))
+            return false;
+        consumeFilledBuffer();
+        startFrame += sampleCountPerChunk;
+    }
+    return true;
+}
+
+bool AudioSampleDataSource::pullSamples(AudioBufferList&amp; buffer, size_t sampleCount, uint64_t timeStamp, double hostTime, PullMode mode)
+{
+    std::unique_lock&lt;Lock&gt; lock(m_lock, std::try_to_lock);
+    if (!lock.owns_lock() || !m_ringBuffer) {
+        size_t byteCount = sampleCount * m_outputDescription-&gt;bytesPerFrame();
+        AudioSampleBufferList::zeroABL(buffer, byteCount);
+        return false;
+    }
+
+    return pullSamplesInternal(buffer, sampleCount, timeStamp, hostTime, mode);
+}
+
+bool AudioSampleDataSource::pullSamples(AudioSampleBufferList&amp; buffer, size_t sampleCount, uint64_t timeStamp, double hostTime, PullMode mode)
+{
+    std::unique_lock&lt;Lock&gt; lock(m_lock, std::try_to_lock);
+    if (!lock.owns_lock() || !m_ringBuffer) {
+        buffer.zero();
+        return false;
+    }
+
+    if (!pullSamplesInternal(buffer.bufferList(), sampleCount, timeStamp, hostTime, mode))
+        return false;
+
+    buffer.setTimes(timeStamp, hostTime);
+    buffer.setSampleCount(sampleCount);
+
+    return true;
+}
+
+} // namespace WebCore
+
+#endif // ENABLE(MEDIA_STREAM)
</ins></span></pre>
</div>
</div>

</body>
</html>