<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>[70589] branches/audio/WebCore</title>
</head>
<body>

<style type="text/css"><!--
#msg dl.meta { border: 1px #006 solid; background: #369; padding: 6px; color: #fff; }
#msg dl.meta dt { float: left; width: 6em; font-weight: bold; }
#msg dt:after { content:':';}
#msg dl, #msg dt, #msg ul, #msg li, #header, #footer, #logmsg { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt;  }
#msg dl a { font-weight: bold}
#msg dl a:link    { color:#fc3; }
#msg dl a:active  { color:#ff0; }
#msg dl a:visited { color:#cc6; }
h3 { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; font-weight: bold; }
#msg pre { overflow: auto; background: #ffc; border: 1px #fa0 solid; padding: 6px; }
#logmsg { background: #ffc; border: 1px #fa0 solid; padding: 1em 1em 0 1em; }
#logmsg p, #logmsg pre, #logmsg blockquote { margin: 0 0 1em 0; }
#logmsg p, #logmsg li, #logmsg dt, #logmsg dd { line-height: 14pt; }
#logmsg h1, #logmsg h2, #logmsg h3, #logmsg h4, #logmsg h5, #logmsg h6 { margin: .5em 0; }
#logmsg h1:first-child, #logmsg h2:first-child, #logmsg h3:first-child, #logmsg h4:first-child, #logmsg h5:first-child, #logmsg h6:first-child { margin-top: 0; }
#logmsg ul, #logmsg ol { padding: 0; list-style-position: inside; margin: 0 0 0 1em; }
#logmsg ul { text-indent: -1em; padding-left: 1em; }#logmsg ol { text-indent: -1.5em; padding-left: 1.5em; }
#logmsg > ul, #logmsg > ol { margin: 0 0 1em 0; }
#logmsg pre { background: #eee; padding: 1em; }
#logmsg blockquote { border: 1px solid #fa0; border-left-width: 10px; padding: 1em 1em 0 1em; background: white;}
#logmsg dl { margin: 0; }
#logmsg dt { font-weight: bold; }
#logmsg dd { margin: 0; padding: 0 0 0.5em 0; }
#logmsg dd:before { content:'\00bb';}
#logmsg table { border-spacing: 0px; border-collapse: collapse; border-top: 4px solid #fa0; border-bottom: 1px solid #fa0; background: #fff; }
#logmsg table th { text-align: left; font-weight: normal; padding: 0.2em 0.5em; border-top: 1px dotted #fa0; }
#logmsg table td { text-align: right; border-top: 1px dotted #fa0; padding: 0.2em 0.5em; }
#logmsg table thead th { text-align: center; border-bottom: 1px solid #fa0; }
#logmsg table th.Corner { text-align: left; }
#logmsg hr { border: none 0; border-top: 2px dashed #fa0; height: 1px; }
#header, #footer { color: #fff; background: #636; border: 1px #300 solid; padding: 6px; }
#patch { width: 100%; }
#patch h4 {font-family: verdana,arial,helvetica,sans-serif;font-size:10pt;padding:8px;background:#369;color:#fff;margin:0;}
#patch .propset h4, #patch .binary h4 {margin:0;}
#patch pre {padding:0;line-height:1.2em;margin:0;}
#patch .diff {width:100%;background:#eee;padding: 0 0 10px 0;overflow:auto;}
#patch .propset .diff, #patch .binary .diff  {padding:10px 0;}
#patch span {display:block;padding:0 10px;}
#patch .modfile, #patch .addfile, #patch .delfile, #patch .propset, #patch .binary, #patch .copfile {border:1px solid #ccc;margin:10px 0;}
#patch ins {background:#dfd;text-decoration:none;display:block;padding:0 10px;}
#patch del {background:#fdd;text-decoration:none;display:block;padding:0 10px;}
#patch .lines, .info {color:#888;background:#fff;}
--></style>
<div id="msg">
<dl class="meta">
<dt>Revision</dt> <dd><a href="http://trac.webkit.org/projects/webkit/changeset/70589">70589</a></dd>
<dt>Author</dt> <dd>crogers@google.com</dd>
<dt>Date</dt> <dd>2010-10-26 16:45:21 -0700 (Tue, 26 Oct 2010)</dd>
</dl>

<h3>Log Message</h3>
<pre>audio branch: changes in preparation for landing in trunk
* added thread safety to ConvolverNode
* throw exception for invalid arguments in connect() / disconnect()
* Cleanup JavaScriptAudioNode, AudioResampler, RealtimeAnalyser and RealtimeAnalyserNode, AudioBufferSourceNode
* add AudioBusMac for loadPlatformResource() method and switch HRTFElevation to use this method
* add comments about Chrome resource abstraction layers
* add custom markChildren() method to JavaScriptAudioNode (fixes bug where &quot;onaudioprocess&quot; was anonymous function)
* minor cleanup in JS and V8 custom bindings</pre>

<h3>Modified Paths</h3>
<ul>
<li><a href="#branchesaudioWebCoreWebCorexcodeprojprojectpbxproj">branches/audio/WebCore/WebCore.xcodeproj/project.pbxproj</a></li>
<li><a href="#branchesaudioWebCoreplatformaudioAudioBush">branches/audio/WebCore/platform/audio/AudioBus.h</a></li>
<li><a href="#branchesaudioWebCoreplatformaudioAudioResamplercpp">branches/audio/WebCore/platform/audio/AudioResampler.cpp</a></li>
<li><a href="#branchesaudioWebCoreplatformaudioAudioResamplerh">branches/audio/WebCore/platform/audio/AudioResampler.h</a></li>
<li><a href="#branchesaudioWebCoreplatformaudioAudioResamplerKernelh">branches/audio/WebCore/platform/audio/AudioResamplerKernel.h</a></li>
<li><a href="#branchesaudioWebCoreplatformaudioAudioResourcesh">branches/audio/WebCore/platform/audio/AudioResources.h</a></li>
<li><a href="#branchesaudioWebCoreplatformaudioHRTFElevationcpp">branches/audio/WebCore/platform/audio/HRTFElevation.cpp</a></li>
<li><a href="#branchesaudioWebCorewebaudioAudioBufferSourceNodecpp">branches/audio/WebCore/webaudio/AudioBufferSourceNode.cpp</a></li>
<li><a href="#branchesaudioWebCorewebaudioAudioBufferSourceNodeh">branches/audio/WebCore/webaudio/AudioBufferSourceNode.h</a></li>
<li><a href="#branchesaudioWebCorewebaudioAudioNodeidl">branches/audio/WebCore/webaudio/AudioNode.idl</a></li>
<li><a href="#branchesaudioWebCorewebaudioConvolverNodecpp">branches/audio/WebCore/webaudio/ConvolverNode.cpp</a></li>
<li><a href="#branchesaudioWebCorewebaudioConvolverNodeh">branches/audio/WebCore/webaudio/ConvolverNode.h</a></li>
<li><a href="#branchesaudioWebCorewebaudioConvolverNodeidl">branches/audio/WebCore/webaudio/ConvolverNode.idl</a></li>
<li><a href="#branchesaudioWebCorewebaudioJSAudioContextCustomcpp">branches/audio/WebCore/webaudio/JSAudioContextCustom.cpp</a></li>
<li><a href="#branchesaudioWebCorewebaudioJSAudioMiscCustomcpp">branches/audio/WebCore/webaudio/JSAudioMiscCustom.cpp</a></li>
<li><a href="#branchesaudioWebCorewebaudioJSAudioNodeCustomcpp">branches/audio/WebCore/webaudio/JSAudioNodeCustom.cpp</a></li>
<li><a href="#branchesaudioWebCorewebaudioJSCachedAudioCustomcpp">branches/audio/WebCore/webaudio/JSCachedAudioCustom.cpp</a></li>
<li><a href="#branchesaudioWebCorewebaudioJavaScriptAudioNodecpp">branches/audio/WebCore/webaudio/JavaScriptAudioNode.cpp</a></li>
<li><a href="#branchesaudioWebCorewebaudioJavaScriptAudioNodeh">branches/audio/WebCore/webaudio/JavaScriptAudioNode.h</a></li>
<li><a href="#branchesaudioWebCorewebaudioJavaScriptAudioNodeidl">branches/audio/WebCore/webaudio/JavaScriptAudioNode.idl</a></li>
<li><a href="#branchesaudioWebCorewebaudioRealtimeAnalysercpp">branches/audio/WebCore/webaudio/RealtimeAnalyser.cpp</a></li>
<li><a href="#branchesaudioWebCorewebaudioRealtimeAnalyserh">branches/audio/WebCore/webaudio/RealtimeAnalyser.h</a></li>
<li><a href="#branchesaudioWebCorewebaudioRealtimeAnalyserNodecpp">branches/audio/WebCore/webaudio/RealtimeAnalyserNode.cpp</a></li>
<li><a href="#branchesaudioWebCorewebaudioRealtimeAnalyserNodeh">branches/audio/WebCore/webaudio/RealtimeAnalyserNode.h</a></li>
<li><a href="#branchesaudioWebCorewebaudioRealtimeAnalyserNodeidl">branches/audio/WebCore/webaudio/RealtimeAnalyserNode.idl</a></li>
<li><a href="#branchesaudioWebCorewebaudioV8AudioContextCustomcpp">branches/audio/WebCore/webaudio/V8AudioContextCustom.cpp</a></li>
<li><a href="#branchesaudioWebCorewebaudioV8AudioMiscCustomcpp">branches/audio/WebCore/webaudio/V8AudioMiscCustom.cpp</a></li>
<li><a href="#branchesaudioWebCorewebaudioV8AudioNodeCustomcpp">branches/audio/WebCore/webaudio/V8AudioNodeCustom.cpp</a></li>
</ul>

<h3>Added Paths</h3>
<ul>
<li><a href="#branchesaudioWebCoreplatformaudiomacAudioBusMacmm">branches/audio/WebCore/platform/audio/mac/AudioBusMac.mm</a></li>
</ul>

</div>
<div id="patch">
<h3>Diff</h3>
<a id="branchesaudioWebCoreWebCorexcodeprojprojectpbxproj"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/WebCore.xcodeproj/project.pbxproj (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/WebCore.xcodeproj/project.pbxproj        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/WebCore.xcodeproj/project.pbxproj        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -5733,6 +5733,7 @@
</span><span class="cx">                 FDCB2FA4125676C000A28935 /* AudioUtilities.h in Headers */ = {isa = PBXBuildFile; fileRef = FDCB2FA2125676C000A28935 /* AudioUtilities.h */; };
</span><span class="cx">                 FDD5C48B11B5ACD900190F9A /* JSRealtimeAnalyserNode.h in Headers */ = {isa = PBXBuildFile; fileRef = FDD5C48911B5ACD900190F9A /* JSRealtimeAnalyserNode.h */; };
</span><span class="cx">                 FDD5C48C11B5ACD900190F9A /* JSRealtimeAnalyserNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = FDD5C48A11B5ACD900190F9A /* JSRealtimeAnalyserNode.cpp */; };
</span><ins>+                FDD60F9612761630005B9276 /* AudioBusMac.mm in Sources */ = {isa = PBXBuildFile; fileRef = FDD60F9512761630005B9276 /* AudioBusMac.mm */; };
</ins><span class="cx">                 FDE8B83511FF89550080E93F /* DelayNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = FDE8B83111FF89550080E93F /* DelayNode.cpp */; };
</span><span class="cx">                 FDE8B83611FF89550080E93F /* DelayNode.h in Headers */ = {isa = PBXBuildFile; fileRef = FDE8B83211FF89550080E93F /* DelayNode.h */; };
</span><span class="cx">                 FDE8B83711FF89550080E93F /* DelayProcessor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = FDE8B83311FF89550080E93F /* DelayProcessor.cpp */; };
</span><span class="lines">@@ -11976,6 +11977,7 @@
</span><span class="cx">                 FDCB2FA2125676C000A28935 /* AudioUtilities.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = AudioUtilities.h; path = webaudio/audio/AudioUtilities.h; sourceTree = SOURCE_ROOT; };
</span><span class="cx">                 FDD5C48911B5ACD900190F9A /* JSRealtimeAnalyserNode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSRealtimeAnalyserNode.h; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="cx">                 FDD5C48A11B5ACD900190F9A /* JSRealtimeAnalyserNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSRealtimeAnalyserNode.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><ins>+                FDD60F9512761630005B9276 /* AudioBusMac.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = AudioBusMac.mm; path = webaudio/audio/mac/AudioBusMac.mm; sourceTree = SOURCE_ROOT; };
</ins><span class="cx">                 FDE8B83111FF89550080E93F /* DelayNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DelayNode.cpp; path = webaudio/DelayNode.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="cx">                 FDE8B83211FF89550080E93F /* DelayNode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DelayNode.h; path = webaudio/DelayNode.h; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="cx">                 FDE8B83311FF89550080E93F /* DelayProcessor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DelayProcessor.cpp; path = webaudio/DelayProcessor.cpp; sourceTree = &quot;&lt;group&gt;&quot;; };
</span><span class="lines">@@ -18823,6 +18825,7 @@
</span><span class="cx">                 FD5842991188BD7000349EE1 /* mac */ = {
</span><span class="cx">                         isa = PBXGroup;
</span><span class="cx">                         children = (
</span><ins>+                                FDD60F9512761630005B9276 /* AudioBusMac.mm */,
</ins><span class="cx">                                 FD4520F71225CDAC002F5136 /* AudioFileReaderMac.cpp */,
</span><span class="cx">                                 FD58429D1188BD7A00349EE1 /* AudioFileReaderMac.h */,
</span><span class="cx">                                 FD58429E1188BD7A00349EE1 /* AudioDestinationMac.cpp */,
</span><span class="lines">@@ -22124,7 +22127,7 @@
</span><span class="cx">                         );
</span><span class="cx">                         runOnlyForDeploymentPostprocessing = 0;
</span><span class="cx">                         shellPath = /bin/sh;
</span><del>-                        shellScript = &quot;# Create a symlink in the built WebCore.framework/Resources to point to audio spatialization files\n# which exist in WebKit/WebCore/audio/AudioSpatialization\n# This is a temporary hack so that people can build WebKit in my private \&quot;audio\&quot; branch and then\n# run the \&quot;run-safari\&quot; script to test out the new audio features.\n\nWEBCORE_RESOURCES_DIR=$BUILT_PRODUCTS_DIR\&quot;/WebCore.framework/Resources\&quot;\nAUDIO_DIR=$BUILD_DIR\&quot;/../WebCore/webaudio\&quot;\n\nrm -f $WEBCORE_RESOURCES_DIR\&quot;/AudioSpatialization\&quot;\nln -s $AUDIO_DIR\&quot;/AudioSpatialization\&quot; $WEBCORE_RESOURCES_DIR\&quot;/AudioSpatialization\&quot;\n&quot;;
</del><ins>+                        shellScript = &quot;# Create a symlink in the built WebCore.framework/Resources to point to audio spatialization files\n# which exist in WebKit/WebCore/audio/AudioSpatialization\n# This is a temporary hack so that people can build WebKit in my private \&quot;audio\&quot; branch and then\n# run the \&quot;run-safari\&quot; script to test out the new audio features.\n\nWEBCORE_RESOURCES_DIR=$BUILT_PRODUCTS_DIR\&quot;/WebCore.framework/Resources\&quot;\nAUDIO_DIR=$BUILD_DIR\&quot;/../WebCore/webaudio\&quot;\n\nrm -f $WEBCORE_RESOURCES_DIR\&quot;/AudioSpatialization\&quot;\nln -s $AUDIO_DIR\&quot;/AudioSpatialization\&quot; $WEBCORE_RESOURCES_DIR\&quot;/AudioSpatialization\&quot;\n\n# symlink for the newer AudioBus::loadPlatformResource()\nln -s $AUDIO_DIR\&quot;/AudioSpatialization/IRC_Composite\&quot; $WEBCORE_RESOURCES_DIR\&quot;/audio\&quot;\n\necho \&quot;XYZENABLE_3D_CANVAS = \&quot;\necho $ENABLE_3D_CANVAS\n&quot;;
</ins><span class="cx">                         showEnvVarsInLog = 0;
</span><span class="cx">                 };
</span><span class="cx"> /* End PBXShellScriptBuildPhase section */
</span><span class="lines">@@ -24483,6 +24486,7 @@
</span><span class="cx">                                 FD06CF131252B30400CD1C68 /* DelayDSPKernel.cpp in Sources */,
</span><span class="cx">                                 FDCB2FA3125676C000A28935 /* AudioUtilities.cpp in Sources */,
</span><span class="cx">                                 FD347FC6126517DB00117C59 /* AudioResamplerKernel.cpp in Sources */,
</span><ins>+                                FDD60F9612761630005B9276 /* AudioBusMac.mm in Sources */,
</ins><span class="cx">                         );
</span><span class="cx">                         runOnlyForDeploymentPostprocessing = 0;
</span><span class="cx">                 };
</span></span></pre></div>
<a id="branchesaudioWebCoreplatformaudioAudioBush"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/platform/audio/AudioBus.h (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/platform/audio/AudioBus.h        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/platform/audio/AudioBus.h        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -117,6 +117,8 @@
</span><span class="cx">     // Makes maximum absolute value == 1.0 (if possible).
</span><span class="cx">     void normalize();
</span><span class="cx"> 
</span><ins>+    static PassOwnPtr&lt;AudioBus&gt; loadPlatformResource(const char* name, double sampleRate);
+
</ins><span class="cx"> protected:
</span><span class="cx">     AudioBus() { };
</span><span class="cx"> 
</span></span></pre></div>
<a id="branchesaudioWebCoreplatformaudioAudioResamplercpp"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/platform/audio/AudioResampler.cpp (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/platform/audio/AudioResampler.cpp        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/platform/audio/AudioResampler.cpp        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -102,7 +102,7 @@
</span><span class="cx"> 
</span><span class="cx">     // Now that we have the source data, resample each channel into the destination bus.
</span><span class="cx">     // FIXME: optimize for the common stereo case where it's faster to process both left/right channels in the same inner loop.
</span><del>-    for (unsigned i = 0; i &lt; m_kernels.size(); ++i) {
</del><ins>+    for (unsigned i = 0; i &lt; numberOfChannels; ++i) {
</ins><span class="cx">         float* destination = destinationBus-&gt;channel(i)-&gt;data();
</span><span class="cx">         m_kernels[i]-&gt;process(destination, framesToProcess);
</span><span class="cx">     }
</span><span class="lines">@@ -118,7 +118,8 @@
</span><span class="cx"> 
</span><span class="cx"> void AudioResampler::reset()
</span><span class="cx"> {
</span><del>-    for (unsigned i = 0; i &lt; m_kernels.size(); ++i)
</del><ins>+    unsigned numberOfChannels = m_kernels.size();
+    for (unsigned i = 0; i &lt; numberOfChannels; ++i)
</ins><span class="cx">         m_kernels[i]-&gt;reset();
</span><span class="cx"> }
</span><span class="cx"> 
</span></span></pre></div>
<a id="branchesaudioWebCoreplatformaudioAudioResamplerh"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/platform/audio/AudioResampler.h (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/platform/audio/AudioResampler.h        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/platform/audio/AudioResampler.h        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -35,10 +35,11 @@
</span><span class="cx"> 
</span><span class="cx"> // AudioResampler resamples the audio stream from an AudioSourceProvider.
</span><span class="cx"> // The audio stream may be single or multi-channel.
</span><ins>+// The default constructor defaults to single-channel (mono).
</ins><span class="cx"> 
</span><span class="cx"> class AudioResampler {
</span><span class="cx"> public:
</span><del>-    AudioResampler(); // default to mono
</del><ins>+    AudioResampler();
</ins><span class="cx">     AudioResampler(unsigned numberOfChannels);
</span><span class="cx">     ~AudioResampler() { }
</span><span class="cx">     
</span><span class="lines">@@ -56,7 +57,7 @@
</span><span class="cx"> 
</span><span class="cx">     static const double MaxRate;
</span><span class="cx"> 
</span><del>-protected:
</del><ins>+private:
</ins><span class="cx">     double m_rate;
</span><span class="cx">     Vector&lt;OwnPtr&lt;AudioResamplerKernel&gt; &gt; m_kernels;
</span><span class="cx">     OwnPtr&lt;AudioBus&gt; m_sourceBus;
</span></span></pre></div>
<a id="branchesaudioWebCoreplatformaudioAudioResamplerKernelh"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/platform/audio/AudioResamplerKernel.h (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/platform/audio/AudioResamplerKernel.h        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/platform/audio/AudioResamplerKernel.h        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -54,7 +54,7 @@
</span><span class="cx"> 
</span><span class="cx">     static const size_t MaxFramesToProcess;
</span><span class="cx"> 
</span><del>-protected:
</del><ins>+private:
</ins><span class="cx">     double rate() const;
</span><span class="cx"> 
</span><span class="cx">     AudioResampler* m_resampler;
</span></span></pre></div>
<a id="branchesaudioWebCoreplatformaudioAudioResourcesh"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/platform/audio/AudioResources.h (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/platform/audio/AudioResources.h        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/platform/audio/AudioResources.h        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -42,4 +42,20 @@
</span><span class="cx"> 
</span><span class="cx"> } // namespace WebCore
</span><span class="cx"> 
</span><ins>+// /webkit/WebKit/chromium/src/ChromiumBridge.cpp:586:PassRefPtr&lt;Image&gt; ChromiumBridge::loadPlatformImageResource(const char* name)
+
+// Chromium/src/webkit/glue/webkitclient_impl.cc
+// WebData WebKitClientImpl::loadResource(const char* name);
+// 
+// Chromium/src/renderer/renderer_glue.cc
+// base::StringPiece GetDataResource(int resource_id);
+// 
+// Chromium/src/app/resource_bundle_posix.cc
+// base::StringPiece ResourceBundle::GetRawDataResource(int resource_id) const;
+// 
+// Chromium/src/base/data_pack.cc
+// bool DataPack::GetStringPiece(uint32 resource_id, StringPiece* data) const;
+
+
+
</ins><span class="cx"> #endif // AudioResources_h
</span></span></pre></div>
<a id="branchesaudioWebCoreplatformaudioHRTFElevationcpp"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/platform/audio/HRTFElevation.cpp (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/platform/audio/HRTFElevation.cpp        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/platform/audio/HRTFElevation.cpp        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -101,7 +101,7 @@
</span><span class="cx">     int positiveElevation = elevation &lt; 0 ? elevation + 360 : elevation;
</span><span class="cx">     String resourceName = String::format(&quot;IRC_%s_C_R0195_T%03d_P%03d&quot;, subjectName.utf8().data(), azimuth, positiveElevation);
</span><span class="cx"> 
</span><del>-    OwnPtr&lt;AudioBus&gt; impulseResponse(createBusFromAudioFileResource(resourceName, sampleRate));
</del><ins>+    OwnPtr&lt;AudioBus&gt; impulseResponse(AudioBus::loadPlatformResource(resourceName.utf8().data(), sampleRate));
</ins><span class="cx"> 
</span><span class="cx">     ASSERT(impulseResponse.get());
</span><span class="cx">     if (!impulseResponse.get())
</span></span></pre></div>
<a id="branchesaudioWebCoreplatformaudiomacAudioBusMacmmfromrev70583branchesaudioWebCoreplatformaudioAudioResourcesh"></a>
<div class="copfile"><h4>Copied: branches/audio/WebCore/platform/audio/mac/AudioBusMac.mm (from rev 70583, branches/audio/WebCore/platform/audio/AudioResources.h) (0 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/platform/audio/mac/AudioBusMac.mm                                (rev 0)
+++ branches/audio/WebCore/platform/audio/mac/AudioBusMac.mm        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -0,0 +1,69 @@
</span><ins>+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Computer, Inc. (&quot;Apple&quot;) nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS &quot;AS IS&quot; AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#import &quot;config.h&quot;
+
+#if ENABLE(WEB_AUDIO)
+
+#import &quot;AudioBus.h&quot;
+
+#import &quot;AudioFileReader.h&quot;
+#import &lt;wtf/OwnPtr.h&gt;
+#import &lt;wtf/PassOwnPtr.h&gt;
+
+@interface WebCoreBundleClass : NSObject
+@end
+
+@implementation WebCoreBundleClass
+@end
+
+namespace WebCore {
+
+PassOwnPtr&lt;AudioBus&gt; AudioBus::loadPlatformResource(const char* name, double sampleRate)
+{
+    // This method can be called from other than the main thread, so we need an auto-release pool.
+    NSAutoreleasePool* pool = [[NSAutoreleasePool alloc] init];
+    
+    NSBundle *bundle = [NSBundle bundleForClass:[WebCoreBundleClass class]];
+    NSString *audioFilePath = [bundle pathForResource:[NSString stringWithUTF8String:name] ofType:@&quot;aif&quot; inDirectory:@&quot;audio&quot;];
+    NSData *audioData = [NSData dataWithContentsOfFile:audioFilePath];
+
+    if (audioData) {
+        OwnPtr&lt;AudioBus&gt; bus(createBusFromInMemoryAudioFile([audioData bytes], [audioData length], false, sampleRate));
+        [pool release];
+        return bus.release();
+    }
+
+    ASSERT_NOT_REACHED();
+    [pool release];
+    return 0;
+}
+
+} // WebCore
+
+#endif // ENABLE(WEB_AUDIO)
</ins></span></pre></div>
<a id="branchesaudioWebCorewebaudioAudioBufferSourceNodecpp"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/AudioBufferSourceNode.cpp (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/AudioBufferSourceNode.cpp        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/AudioBufferSourceNode.cpp        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -74,55 +74,56 @@
</span><span class="cx"> {
</span><span class="cx">     AudioBus* outputBus = output(0)-&gt;bus();
</span><span class="cx"> 
</span><ins>+    if (!isInitialized()) {
+        outputBus-&gt;zero();
+        return;
+    }
+
</ins><span class="cx">     // The audio thread can't block on this lock, so we call tryLock() instead.
</span><span class="cx">     // Careful - this is a tryLock() and not an autolocker, so we must unlock() before every return.
</span><span class="cx">     if (m_processLock.tryLock()) {
</span><del>-        if (!isInitialized())
</del><ins>+        // Check if it's time to start playing.
+        double sampleRate = this-&gt;sampleRate();
+        double pitchRate = totalPitchRate();
+        double quantumStartTime = context()-&gt;currentTime();
+        double quantumEndTime = quantumStartTime + framesToProcess / sampleRate;
+
+        if (!m_isPlaying || m_hasFinished || !buffer() || m_startTime &gt;= quantumEndTime) {
+            // FIXME: can optimize here by propagating silent hint instead of forcing the whole chain to process silence.
</ins><span class="cx">             outputBus-&gt;zero();
</span><del>-        else {
-            // Check if it's time to start playing
-            double sampleRate = this-&gt;sampleRate();
-            double pitchRate = totalPitchRate();
-            double quantumStartTime = context()-&gt;currentTime();
-            double quantumEndTime = quantumStartTime + framesToProcess / sampleRate;
</del><ins>+            m_processLock.unlock();
+            return;
+        }
</ins><span class="cx"> 
</span><del>-            if (!m_isPlaying || m_hasFinished || !buffer() || m_startTime &gt;= quantumEndTime) {
-                // FIXME: can optimize here by propagating silent hint instead of forcing the whole chain to process silence...
-                outputBus-&gt;zero();
-                m_processLock.unlock();
-                return;
-            }
</del><ins>+        // Handle sample-accurate scheduling so that buffer playback will happen at a very precise time.
+        m_schedulingFrameDelay = 0;
+        if (m_startTime &gt;= quantumStartTime) {
+            // m_schedulingFrameDelay is set here only the very first render quantum (because of above check: m_startTime &gt;= quantumEndTime)
+            // So: quantumStartTime &lt;= m_startTime &lt; quantumEndTime
+            ASSERT(m_startTime &lt; quantumEndTime);
+            
+            double startTimeInQuantum = m_startTime - quantumStartTime;
+            double startFrameInQuantum = startTimeInQuantum * sampleRate;
+            
+            // m_schedulingFrameDelay is used in provideInput(), so factor in the current playback pitch rate.
+            m_schedulingFrameDelay = static_cast&lt;int&gt;(pitchRate * startFrameInQuantum);
+        }
</ins><span class="cx"> 
</span><del>-            // Handle sample-accurate scheduling so that buffer playback will happen at a very precise time.
-            m_schedulingFrameDelay = 0;
-            if (m_startTime &gt;= quantumStartTime) {
-                // m_schedulingFrameDelay is set here only the very first render quantum (because of above check: m_startTime &gt;= quantumEndTime)
-                // So: quantumStartTime &lt;= m_startTime &lt; quantumEndTime
-                ASSERT(m_startTime &lt; quantumEndTime);
-                
-                double startTimeInQuantum = m_startTime - quantumStartTime;
-                double startFrameInQuantum = startTimeInQuantum * sampleRate;
-                
-                // m_schedulingFrameDelay is used in provideInput(), so factor in the current playback pitch rate.
-                m_schedulingFrameDelay = static_cast&lt;int&gt;(pitchRate * startFrameInQuantum);
-            }
</del><ins>+        // FIXME: optimization opportunity:
+        // With a bit of work, it should be possible to avoid going through the resampler completely when the pitchRate == 1,
+        // especially if the pitchRate has never deviated from 1 in the past.
</ins><span class="cx"> 
</span><del>-            // FIXME: optimization opportunity:
-            // With a bit of work, it should be possible to avoid going through the resampler completely when the pitchRate == 1,
-            // especially if the pitchRate has never deviated from 1 in the past.
-    
-            // Read the samples through the pitch resampler.  Our provideInput() method will be called (possibly more than once) by the resampler.
-            m_resampler.setRate(pitchRate);
-            m_resampler.process(this, outputBus, framesToProcess);
-    
-            // Apply the gain (in-place) to the output bus.
-            double totalGain = gain()-&gt;value() * m_buffer-&gt;gain();
-            outputBus-&gt;copyWithGainFrom(*outputBus, &amp;m_lastGain, totalGain);
-        }
</del><ins>+        // Read the samples through the pitch resampler.  Our provideInput() method will be called by the resampler.
+        m_resampler.setRate(pitchRate);
+        m_resampler.process(this, outputBus, framesToProcess);
</ins><span class="cx"> 
</span><ins>+        // Apply the gain (in-place) to the output bus.
+        double totalGain = gain()-&gt;value() * m_buffer-&gt;gain();
+        outputBus-&gt;copyWithGainFrom(*outputBus, &amp;m_lastGain, totalGain);
+
</ins><span class="cx">         m_processLock.unlock();
</span><span class="cx">     } else {
</span><del>-        // Too bad - the tryLock() failed.  We must be in the middle of changing buffers and were already outputting silence anyway...
</del><ins>+        // Too bad - the tryLock() failed.  We must be in the middle of changing buffers and were already outputting silence anyway.
</ins><span class="cx">         outputBus-&gt;zero();
</span><span class="cx">     }
</span><span class="cx"> }
</span><span class="lines">@@ -138,10 +139,10 @@
</span><span class="cx">     if (!bus || !buffer())
</span><span class="cx">         return;
</span><span class="cx"> 
</span><del>-    unsigned busChannelCount = bus-&gt;numberOfChannels();
</del><span class="cx">     unsigned numberOfChannels = this-&gt;numberOfChannels();
</span><ins>+    unsigned busNumberOfChannels = bus-&gt;numberOfChannels();
</ins><span class="cx"> 
</span><del>-    bool channelCountGood = numberOfChannels == busChannelCount &amp;&amp; (numberOfChannels == 1 || numberOfChannels == 2);
</del><ins>+    bool channelCountGood = numberOfChannels == busNumberOfChannels &amp;&amp; (numberOfChannels == 1 || numberOfChannels == 2);
</ins><span class="cx">     ASSERT(channelCountGood);
</span><span class="cx">     if (!channelCountGood)
</span><span class="cx">         return;
</span><span class="lines">@@ -153,7 +154,7 @@
</span><span class="cx">         return;
</span><span class="cx">     float* destinationR = (numberOfChannels &lt; 2) ? 0 : bus-&gt;channel(1)-&gt;data();
</span><span class="cx"> 
</span><del>-    size_t bufferLength = m_buffer-&gt;length();
</del><ins>+    size_t bufferLength = buffer()-&gt;length();
</ins><span class="cx">     double bufferSampleRate = buffer()-&gt;sampleRate();
</span><span class="cx"> 
</span><span class="cx">     // Calculate the start and end frames in our buffer that we want to play.
</span><span class="lines">@@ -166,11 +167,11 @@
</span><span class="cx">     if (m_isGrain)
</span><span class="cx">         endFrame += 512;
</span><span class="cx"> 
</span><del>-    // Sanity check the endFrame.
</del><ins>+    // Do some sanity checking.
+    if (startFrame &gt;= bufferLength)
+        startFrame = !bufferLength ? 0 : bufferLength - 1;
</ins><span class="cx">     if (endFrame &gt; bufferLength)
</span><span class="cx">         endFrame = bufferLength;
</span><del>-
-    // Sanity check the read index.
</del><span class="cx">     if (m_readIndex &gt;= endFrame)
</span><span class="cx">         m_readIndex = startFrame; // reset to start
</span><span class="cx">     
</span><span class="lines">@@ -199,28 +200,31 @@
</span><span class="cx">     // from the end of the buffer to the start if playing back with looping and also the case where we simply reach the
</span><span class="cx">     // end of the sample data, but haven't yet rendered numberOfFrames worth of output.
</span><span class="cx">     while (framesToProcess &gt; 0) {
</span><ins>+        ASSERT(m_readIndex &lt;= endFrame);
+        if (m_readIndex &gt; endFrame)
+            return;
+            
</ins><span class="cx">         // Figure out how many frames we can process this time.
</span><del>-        ASSERT(m_readIndex &lt;= endFrame);
</del><span class="cx">         int framesAvailable = endFrame - m_readIndex;
</span><span class="cx">         int framesThisTime = min(framesToProcess, framesAvailable);
</span><span class="cx">         
</span><span class="cx">         // Create the destination bus for the part of the destination we're processing this time.
</span><del>-        AudioBus currentDestinationBus(busChannelCount, framesThisTime, false);
</del><ins>+        AudioBus currentDestinationBus(busNumberOfChannels, framesThisTime, false);
</ins><span class="cx">         currentDestinationBus.setChannelMemory(0, destinationL, framesThisTime);
</span><del>-        if (busChannelCount &gt; 1)
</del><ins>+        if (busNumberOfChannels &gt; 1)
</ins><span class="cx">             currentDestinationBus.setChannelMemory(1, destinationR, framesThisTime);
</span><span class="cx"> 
</span><span class="cx">         // Generate output from the buffer.
</span><span class="cx">         readFromBuffer(&amp;currentDestinationBus, framesThisTime);
</span><span class="cx"> 
</span><del>-        // Update destination pointers.
</del><ins>+        // Update the destination pointers.
</ins><span class="cx">         destinationL += framesThisTime;
</span><del>-        if (busChannelCount &gt; 1)
</del><ins>+        if (busNumberOfChannels &gt; 1)
</ins><span class="cx">             destinationR += framesThisTime;
</span><span class="cx"> 
</span><span class="cx">         framesToProcess -= framesThisTime;
</span><span class="cx"> 
</span><del>-        // Handle the case where we reach the end of the sample data we're supposed to play for the buffer.
</del><ins>+        // Handle the case where we reach the end of the part of the sample data we're supposed to play for the buffer.
</ins><span class="cx">         if (m_readIndex &gt;= endFrame) {
</span><span class="cx">             m_readIndex = startFrame;
</span><span class="cx">             m_grainFrameCount = 0;
</span><span class="lines">@@ -257,7 +261,7 @@
</span><span class="cx">         return;
</span><span class="cx">     
</span><span class="cx">     unsigned numberOfChannels = this-&gt;numberOfChannels();
</span><del>-    // FIXME: can add support for sources with more than two channels, but this is not a common case.
</del><ins>+    // FIXME: we can add support for sources with more than two channels, but this is not a common case.
</ins><span class="cx">     bool channelCountGood = numberOfChannels == 1 || numberOfChannels == 2;
</span><span class="cx">     ASSERT(channelCountGood);
</span><span class="cx">     if (!channelCountGood)
</span><span class="lines">@@ -271,7 +275,7 @@
</span><span class="cx">     if (!isSourceGood)
</span><span class="cx">         return;
</span><span class="cx"> 
</span><del>-    // Offset the pointers to the current read position.
</del><ins>+    // Offset the pointers to the current read position in the sample buffer.
</ins><span class="cx">     sourceL += m_readIndex;
</span><span class="cx">     sourceR += m_readIndex;
</span><span class="cx"> 
</span><span class="lines">@@ -288,9 +292,8 @@
</span><span class="cx">     else {
</span><span class="cx">         // Simply copy the data from the source buffer to the destination.
</span><span class="cx">         memcpy(destinationL, sourceL, sizeof(float) * framesToProcess);
</span><del>-        if (numberOfChannels == 2) {
</del><ins>+        if (numberOfChannels == 2)
</ins><span class="cx">             memcpy(destinationR, sourceR, sizeof(float) * framesToProcess);
</span><del>-        }
</del><span class="cx">     }
</span><span class="cx"> 
</span><span class="cx">     // Advance the buffer's read index.
</span><span class="lines">@@ -332,7 +335,9 @@
</span><span class="cx"> 
</span><span class="cx"> void AudioBufferSourceNode::setBuffer(AudioBuffer* buffer)
</span><span class="cx"> {
</span><del>-    // The context must be locked since changing the buffer can re-configure the number of channels we output.
</del><ins>+    ASSERT(isMainThread());
+    
+    // The context must be locked since changing the buffer can re-configure the number of channels that are output.
</ins><span class="cx">     AudioContext::AutoLocker contextLocker(context());
</span><span class="cx">     
</span><span class="cx">     // This synchronizes with process().
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioAudioBufferSourceNodeh"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/AudioBufferSourceNode.h (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/AudioBufferSourceNode.h        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/AudioBufferSourceNode.h        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -40,7 +40,7 @@
</span><span class="cx"> 
</span><span class="cx"> class AudioContext;
</span><span class="cx"> 
</span><del>-// AudioBufferSourceNode is an AudioNode representing an audio source from an in-memory audio asset in an AudioBuffer.
</del><ins>+// AudioBufferSourceNode is an AudioNode representing an audio source from an in-memory audio asset represented by an AudioBuffer.
</ins><span class="cx"> // It generally will be used for short sounds which require a high degree of scheduling flexibility (can playback in rhythmically perfect ways).
</span><span class="cx"> 
</span><span class="cx"> class AudioBufferSourceNode : public AudioSourceNode, public AudioSourceProvider {
</span><span class="lines">@@ -60,6 +60,7 @@
</span><span class="cx">     // When process() is called, the resampler calls provideInput (in the audio thread) to gets its input stream.
</span><span class="cx">     virtual void provideInput(AudioBus*, size_t numberOfFrames);
</span><span class="cx">     
</span><ins>+    // setBuffer() is called on the main thread.  This is the buffer we use for playback.
</ins><span class="cx">     void setBuffer(AudioBuffer*);
</span><span class="cx">     AudioBuffer* buffer() { return m_buffer.get(); }
</span><span class="cx">                     
</span><span class="lines">@@ -95,16 +96,16 @@
</span><span class="cx">     // m_isPlaying is set to true when noteOn() or noteGrainOn() is called.
</span><span class="cx">     bool m_isPlaying;
</span><span class="cx"> 
</span><del>-    // If m_isLooping is false, then this node will output silence after it reaches the end of the sample data in the buffer.
</del><ins>+    // If m_isLooping is false, then this node will be done playing and become inactive after it reaches the end of the sample data in the buffer.
</ins><span class="cx">     // If true, it will wrap around to the start of the buffer each time it reaches the end.
</span><span class="cx">     bool m_isLooping;
</span><span class="cx"> 
</span><del>-    // We are considered finished when we reach the end of the buffer's sample data after noteOn() has been called.
</del><ins>+    // This node is considered finished when it reaches the end of the buffer's sample data after noteOn() has been called.
</ins><span class="cx">     // This will only be set to true if m_isLooping == false.
</span><span class="cx">     bool m_hasFinished;
</span><span class="cx"> 
</span><del>-    // m_startTime is the time to start playing based on the context's timeline (0.0 means now).
-    double m_startTime;
</del><ins>+    // m_startTime is the time to start playing based on the context's timeline (0.0 or a time less than the context's current time means &quot;now&quot;).
+    double m_startTime; // in seconds
</ins><span class="cx"> 
</span><span class="cx">     // m_schedulingFrameDelay is the sample-accurate scheduling offset.
</span><span class="cx">     // It's used so that we start rendering audio samples at a very precise point in time.
</span><span class="lines">@@ -121,7 +122,7 @@
</span><span class="cx">     int m_grainFrameCount; // keeps track of which frame in the grain we're currently rendering
</span><span class="cx"> 
</span><span class="cx">     // totalPitchRate() returns the instantaneous pitch rate (non-time preserving).
</span><del>-    // It incorporates a base pitch rate, any sample-rate conversion factor from the buffer, and any doppler shift from an associated panner node.
</del><ins>+    // It incorporates the base pitch rate, any sample-rate conversion factor from the buffer, and any doppler shift from an associated panner node.
</ins><span class="cx">     double totalPitchRate();
</span><span class="cx"> 
</span><span class="cx">     // m_resampler performs the pitch rate changes to the buffer playback.
</span><span class="lines">@@ -130,6 +131,7 @@
</span><span class="cx">     // m_lastGain provides continuity when we dynamically adjust the gain.
</span><span class="cx">     double m_lastGain;
</span><span class="cx">     
</span><ins>+    // We optionally keep track of a panner node which has a doppler shift that is incorporated into the pitch rate.
</ins><span class="cx">     RefPtr&lt;AudioPannerNode&gt; m_pannerNode;
</span><span class="cx"> 
</span><span class="cx">     // This synchronizes process() with setBuffer() which can cause dynamic channel count changes.
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioAudioNodeidl"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/AudioNode.idl (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/AudioNode.idl        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/AudioNode.idl        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -30,7 +30,10 @@
</span><span class="cx">         readonly attribute unsigned long numberOfInputs;
</span><span class="cx">         readonly attribute unsigned long numberOfOutputs;
</span><span class="cx"> 
</span><del>-        [Custom] void connect(in AudioNode destination, in unsigned long output, in unsigned long input);
-        [Custom] void disconnect(in unsigned long output);
</del><ins>+        [Custom] void connect(in AudioNode destination, in unsigned long output, in unsigned long input)
+            raises(DOMException);
+
+        [Custom] void disconnect(in unsigned long output)
+            raises(DOMException);
</ins><span class="cx">     };
</span><span class="cx"> }
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioConvolverNodecpp"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/ConvolverNode.cpp (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/ConvolverNode.cpp        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/ConvolverNode.cpp        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -51,7 +51,7 @@
</span><span class="cx">     
</span><span class="cx">     setType(NodeTypeConvolver);
</span><span class="cx">     
</span><del>-    initialize(); // we can run even without an impulse response buffer
</del><ins>+    initialize();
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> ConvolverNode::~ConvolverNode()
</span><span class="lines">@@ -61,28 +61,31 @@
</span><span class="cx"> 
</span><span class="cx"> void ConvolverNode::process(size_t framesToProcess)
</span><span class="cx"> {
</span><del>-    AudioBus* destination = output(0)-&gt;bus();
-    ASSERT(destination);
</del><ins>+    AudioBus* outputBus = output(0)-&gt;bus();
+    ASSERT(outputBus);
</ins><span class="cx"> 
</span><del>-    // Check for reverb buffer change.
-    if (m_newReverb.get())
-        m_reverb = m_newReverb.release();
-
-    if (!isInitialized() || !m_reverb.get()) {
-        destination-&gt;zero();
-        return;
</del><ins>+    // Synchronize with possible dynamic changes to the impulse response.
+    if (m_processLock.tryLock()) {
+        if (!isInitialized() || !m_reverb.get())
+            outputBus-&gt;zero();
+        else {
+            // Process using the convolution engine.
+            // Note that we can handle the case where nothing is connected to the input, in which case we'll just feed silence into the convolver.
+            // FIXME:  If we wanted to get fancy we could try to factor in the 'tail time' and stop processing once the tail dies down if
+            // we keep getting fed silence.
+            m_reverb-&gt;process(input(0)-&gt;bus(), outputBus, framesToProcess);
+        }
+        
+        m_processLock.unlock();
+    } else {
+        // Too bad - the tryLock() failed.  We must be in the middle of setting a new impulse response.
+        outputBus-&gt;zero();
</ins><span class="cx">     }
</span><del>-
-    // Actually process using the convolution engine.
-    // Note that we can handle the case where nothing is connected to the input, in which case we'll just feed silence into the convolver.
-    // FIXME:  If we wanted to get fancy we could try to factor in the 'tail time' and stop processing once the tail dies down if
-    // we keep getting fed silence.
-    AudioBus* source = input(0)-&gt;bus();
-    m_reverb-&gt;process(source, destination, framesToProcess);
</del><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void ConvolverNode::reset()
</span><span class="cx"> {
</span><ins>+    MutexLocker locker(m_processLock);
</ins><span class="cx">     if (m_reverb.get())
</span><span class="cx">         m_reverb-&gt;reset();
</span><span class="cx"> }
</span><span class="lines">@@ -100,32 +103,46 @@
</span><span class="cx">     if (!isInitialized())
</span><span class="cx">         return;
</span><span class="cx"> 
</span><del>-    // FIXME: rare race condition with m_newReverb leaking...
</del><span class="cx">     m_reverb.clear();
</span><del>-    m_newReverb.clear();
</del><span class="cx">     AudioNode::uninitialize();
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void ConvolverNode::setBuffer(AudioBuffer* buffer)
</span><span class="cx"> {
</span><ins>+    ASSERT(isMainThread());
+    
+    ASSERT(buffer);
</ins><span class="cx">     if (!buffer)
</span><span class="cx">         return;
</span><span class="cx"> 
</span><del>-    // Wrap the Float32Arrays by an AudioBus. Luckily it's an efficient pointer set and not a memcpy().
</del><span class="cx">     unsigned numberOfChannels = buffer-&gt;numberOfChannels();
</span><span class="cx">     size_t bufferLength = buffer-&gt;length();
</span><ins>+
+    // The current implementation supports up to four channel impulse responses, which are interpreted as true-stereo (see Reverb class).
+    bool isBufferGood = numberOfChannels &gt; 0 &amp;&amp; numberOfChannels &lt;= 4 &amp;&amp; bufferLength;
+    ASSERT(isBufferGood);
+    if (!isBufferGood)
+        return;
+
+    // Wrap the AudioBuffer by an AudioBus. It's an efficient pointer set and not a memcpy().
</ins><span class="cx">     AudioBus bufferBus(numberOfChannels, bufferLength, false);
</span><del>-
-    for (unsigned int i = 0; i &lt; numberOfChannels; ++i)
</del><ins>+    for (unsigned i = 0; i &lt; numberOfChannels; ++i)
</ins><span class="cx">         bufferBus.setChannelMemory(i, buffer-&gt;getChannelData(i)-&gt;data(), bufferLength);
</span><span class="cx">     
</span><span class="cx">     // Create the reverb with the given impulse response.
</span><del>-    m_newReverb = adoptPtr(new Reverb(&amp;bufferBus, AudioNode::ProcessingSizeInFrames, MaxFFTSize, 2, true));
-    m_buffer = buffer;
</del><ins>+    OwnPtr&lt;Reverb&gt; reverb = adoptPtr(new Reverb(&amp;bufferBus, AudioNode::ProcessingSizeInFrames, MaxFFTSize, 2, true));
+
+    {
+        // Synchronize with process().
+        MutexLocker locker(m_processLock);
+        m_reverb = reverb.release();
+        m_buffer = buffer;
+    }
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> AudioBuffer* ConvolverNode::buffer()
</span><span class="cx"> {
</span><ins>+    ASSERT(isMainThread());
</ins><span class="cx">     return m_buffer.get();
</span><span class="cx"> }
</span><span class="cx"> 
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioConvolverNodeh"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/ConvolverNode.h (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/ConvolverNode.h        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/ConvolverNode.h        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -28,10 +28,10 @@
</span><span class="cx"> #include &quot;AudioNode.h&quot;
</span><span class="cx"> #include &lt;wtf/OwnPtr.h&gt;
</span><span class="cx"> #include &lt;wtf/RefPtr.h&gt;
</span><ins>+#include &lt;wtf/Threading.h&gt;
</ins><span class="cx"> 
</span><span class="cx"> namespace WebCore {
</span><span class="cx"> 
</span><del>-class AudioParam;
</del><span class="cx"> class AudioBuffer;
</span><span class="cx"> class Reverb;
</span><span class="cx">     
</span><span class="lines">@@ -51,15 +51,17 @@
</span><span class="cx">     virtual void uninitialize();
</span><span class="cx"> 
</span><span class="cx">     // Impulse responses
</span><del>-    void setBuffer(AudioBuffer* buffer);
</del><ins>+    void setBuffer(AudioBuffer*);
</ins><span class="cx">     AudioBuffer* buffer();
</span><span class="cx"> 
</span><span class="cx"> private:
</span><span class="cx">     ConvolverNode(AudioContext*, double sampleRate);
</span><span class="cx"> 
</span><span class="cx">     OwnPtr&lt;Reverb&gt; m_reverb;
</span><del>-    OwnPtr&lt;Reverb&gt; m_newReverb;
</del><span class="cx">     RefPtr&lt;AudioBuffer&gt; m_buffer;
</span><ins>+
+    // This synchronizes dynamic changes to the convolution impulse response with process().
+    mutable Mutex m_processLock;
</ins><span class="cx"> };
</span><span class="cx"> 
</span><span class="cx"> } // namespace WebCore
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioConvolverNodeidl"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/ConvolverNode.idl (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/ConvolverNode.idl        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/ConvolverNode.idl        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -23,7 +23,7 @@
</span><span class="cx">  */
</span><span class="cx"> 
</span><span class="cx"> module audio {
</span><del>-    // Linear convolution effect
</del><ins>+    // A linear convolution effect
</ins><span class="cx">     interface [
</span><span class="cx">         Conditional=WEB_AUDIO,
</span><span class="cx">         GenerateToJS
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioJSAudioContextCustomcpp"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/JSAudioContextCustom.cpp (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/JSAudioContextCustom.cpp        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/JSAudioContextCustom.cpp        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -26,24 +26,9 @@
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(WEB_AUDIO)
</span><span class="cx"> 
</span><del>-#include &quot;JSAudioContext.h&quot;
-
</del><span class="cx"> #include &quot;AudioContext.h&quot;
</span><del>-#include &quot;AudioNode.h&quot;
-#include &quot;JSAudioGainNode.h&quot;
-#include &quot;JSAudioNode.h&quot;
-#include &quot;JSAudioPannerNode.h&quot;
-#include &quot;JSLowPass2FilterNode.h&quot;
-#include &quot;JSHighPass2FilterNode.h&quot;
-#include &quot;JSConvolverNode.h&quot;
-#include &quot;JSRealtimeAnalyserNode.h&quot;
</del><span class="cx"> 
</span><del>-#include &quot;LowPass2FilterNode.h&quot;
-#include &quot;HighPass2FilterNode.h&quot;
-#include &quot;AudioGainNode.h&quot;
-#include &quot;AudioPannerNode.h&quot;
-#include &quot;ConvolverNode.h&quot;
-#include &quot;RealtimeAnalyserNode.h&quot;
</del><ins>+#include &quot;JSAudioContext.h&quot;
</ins><span class="cx"> 
</span><span class="cx"> namespace WebCore {
</span><span class="cx"> 
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioJSAudioMiscCustomcpp"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/JSAudioMiscCustom.cpp (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/JSAudioMiscCustom.cpp        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/JSAudioMiscCustom.cpp        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -30,17 +30,16 @@
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(WEB_AUDIO)
</span><span class="cx"> 
</span><del>-#include &quot;JSAudioNode.h&quot;
-
</del><ins>+#include &quot;AudioBuffer.h&quot;
</ins><span class="cx"> #include &quot;AudioBufferSourceNode.h&quot;
</span><del>-#include &quot;AudioBuffer.h&quot;
</del><span class="cx"> #include &quot;AudioListener.h&quot;
</span><span class="cx"> #include &quot;ConvolverNode.h&quot;
</span><del>-
</del><ins>+#include &quot;JavaScriptAudioNode.h&quot;
+#include &quot;JSAudioBuffer.h&quot;
</ins><span class="cx"> #include &quot;JSAudioBufferSourceNode.h&quot;
</span><del>-#include &quot;JSAudioBuffer.h&quot;
-#include &quot;JSAudioListener.h&quot;
</del><ins>+#include &quot;JSAudioNode.h&quot;
</ins><span class="cx"> #include &quot;JSConvolverNode.h&quot;
</span><ins>+#include &quot;JSJavaScriptAudioNode.h&quot;
</ins><span class="cx"> 
</span><span class="cx"> using namespace JSC;
</span><span class="cx"> 
</span><span class="lines">@@ -53,7 +52,7 @@
</span><span class="cx">     return result;
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void JSAudioBufferSourceNode::setBuffer(ExecState* /*exec*/, JSValue value)
</del><ins>+void JSAudioBufferSourceNode::setBuffer(ExecState*, JSValue value)
</ins><span class="cx"> {
</span><span class="cx">     AudioBufferSourceNode* imp = static_cast&lt;AudioBufferSourceNode*&gt;(impl());
</span><span class="cx">     imp-&gt;setBuffer(toAudioBuffer(value));
</span><span class="lines">@@ -66,12 +65,18 @@
</span><span class="cx">     return result;
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void JSConvolverNode::setBuffer(ExecState* /*exec*/, JSValue value)
</del><ins>+void JSConvolverNode::setBuffer(ExecState*, JSValue value)
</ins><span class="cx"> {
</span><span class="cx">     ConvolverNode* imp = static_cast&lt;ConvolverNode*&gt;(impl());
</span><span class="cx">     imp-&gt;setBuffer(toAudioBuffer(value));
</span><span class="cx"> }
</span><span class="cx"> 
</span><ins>+void JSJavaScriptAudioNode::markChildren(MarkStack&amp; markStack)
+{
+    Base::markChildren(markStack);
+    static_cast&lt;JavaScriptAudioNode*&gt;(impl())-&gt;markJSEventListeners(markStack);
+}
+
</ins><span class="cx"> } // namespace WebCore
</span><span class="cx"> 
</span><span class="cx"> #endif // ENABLE(WEB_AUDIO)
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioJSAudioNodeCustomcpp"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/JSAudioNodeCustom.cpp (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/JSAudioNodeCustom.cpp        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/JSAudioNodeCustom.cpp        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -29,42 +29,50 @@
</span><span class="cx"> #include &quot;JSAudioNode.h&quot;
</span><span class="cx"> 
</span><span class="cx"> #include &quot;AudioNode.h&quot;
</span><ins>+#include &lt;runtime/Error.h&gt;
</ins><span class="cx"> 
</span><span class="cx"> namespace WebCore {
</span><span class="cx"> 
</span><span class="cx"> JSC::JSValue JSAudioNode::connect(JSC::ExecState* exec)
</span><span class="cx"> {
</span><del>-    AudioNode* audioNode = static_cast&lt;AudioNode*&gt;(impl());
-    unsigned output = 0;
-    unsigned input = 0;
-    
</del><span class="cx">     if (exec-&gt;argumentCount() &lt; 1)
</span><del>-        return JSC::jsUndefined(); // FIXME: should throw exception
</del><ins>+        return throwError(exec, createSyntaxError(exec, &quot;Not enough arguments&quot;));
+
+    if (exec-&gt;argumentCount() &gt; 3)
+        return throwError(exec, createSyntaxError(exec, &quot;Too many arguments&quot;));
+
+    unsigned outputIndex = 0;
+    unsigned inputIndex = 0;
</ins><span class="cx">     
</span><span class="cx">     AudioNode* destinationNode = toAudioNode(exec-&gt;argument(0));
</span><ins>+    if (!destinationNode)
+        return throwError(exec, createSyntaxError(exec, &quot;Invalid destination node&quot;));
</ins><span class="cx">     
</span><span class="cx">     if (exec-&gt;argumentCount() &gt; 1)
</span><del>-        output = exec-&gt;argument(1).toInt32(exec);
</del><ins>+        outputIndex = exec-&gt;argument(1).toInt32(exec);
</ins><span class="cx"> 
</span><span class="cx">     if (exec-&gt;argumentCount() &gt; 2)
</span><del>-        input = exec-&gt;argument(2).toInt32(exec);
</del><ins>+        inputIndex = exec-&gt;argument(2).toInt32(exec);
</ins><span class="cx"> 
</span><del>-    audioNode-&gt;connect(destinationNode, output, input);
</del><ins>+    AudioNode* audioNode = static_cast&lt;AudioNode*&gt;(impl());
+    bool success = audioNode-&gt;connect(destinationNode, outputIndex, inputIndex);
+    if (!success)
+        return throwError(exec, createSyntaxError(exec, &quot;Invalid index parameter&quot;));
+    
</ins><span class="cx">     return JSC::jsUndefined();
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> JSC::JSValue JSAudioNode::disconnect(JSC::ExecState* exec)
</span><del>-{
-    AudioNode* audioNode = static_cast&lt;AudioNode*&gt;(impl());
-    unsigned output = 0;
-    
</del><ins>+{    
</ins><span class="cx">     if (exec-&gt;argumentCount() &gt; 1)
</span><del>-        return JSC::jsUndefined(); // FIXME: should throw exception
</del><ins>+        return throwError(exec, createSyntaxError(exec, &quot;Too many arguments&quot;));
</ins><span class="cx">     
</span><ins>+    unsigned outputIndex = 0;
</ins><span class="cx">     if (exec-&gt;argumentCount() &gt; 0)
</span><del>-        output = exec-&gt;argument(0).toInt32(exec);
</del><ins>+        outputIndex = exec-&gt;argument(0).toInt32(exec);
</ins><span class="cx"> 
</span><del>-    audioNode-&gt;disconnect(output);
</del><ins>+    AudioNode* audioNode = static_cast&lt;AudioNode*&gt;(impl());
+    audioNode-&gt;disconnect(outputIndex);
</ins><span class="cx">     return JSC::jsUndefined();
</span><span class="cx"> }
</span><span class="cx"> 
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioJSCachedAudioCustomcpp"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/JSCachedAudioCustom.cpp (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/JSCachedAudioCustom.cpp        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/JSCachedAudioCustom.cpp        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -27,22 +27,7 @@
</span><span class="cx"> #if ENABLE(WEB_AUDIO)
</span><span class="cx"> 
</span><span class="cx"> #include &quot;JSCachedAudio.h&quot;
</span><del>-
-#include &quot;DOMWindow.h&quot;
-#include &quot;Document.h&quot;
-#include &quot;Event.h&quot;
-#include &quot;File.h&quot;
-#include &quot;Frame.h&quot;
-#include &quot;FrameLoader.h&quot;
-#include &quot;HTMLDocument.h&quot;
-#include &quot;JSDOMWindowCustom.h&quot;
-#include &quot;JSDocument.h&quot;
-#include &quot;JSEvent.h&quot;
-#include &quot;JSEventListener.h&quot;
-#include &quot;JSFile.h&quot;
</del><span class="cx"> #include &quot;CachedAudio.h&quot;
</span><del>-#include &lt;runtime/Error.h&gt;
-#include &lt;interpreter/Interpreter.h&gt;
</del><span class="cx"> 
</span><span class="cx"> using namespace JSC;
</span><span class="cx"> 
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioJavaScriptAudioNodecpp"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/JavaScriptAudioNode.cpp (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/JavaScriptAudioNode.cpp        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/JavaScriptAudioNode.cpp        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -34,36 +34,45 @@
</span><span class="cx"> #include &quot;AudioNodeInput.h&quot;
</span><span class="cx"> #include &quot;AudioNodeOutput.h&quot;
</span><span class="cx"> #include &quot;AudioProcessingEvent.h&quot;
</span><ins>+#include &quot;Document.h&quot;
</ins><span class="cx"> #include &quot;Float32Array.h&quot;
</span><del>-
</del><span class="cx"> #include &lt;wtf/MainThread.h&gt;
</span><span class="cx"> 
</span><span class="cx"> namespace WebCore {
</span><span class="cx"> 
</span><del>-const size_t MaxBufferSize = 16384;
-    
-JavaScriptAudioNode::JavaScriptAudioNode(AudioContext* context, double sampleRate, size_t bufferSize, unsigned /*numberOfInputs*/, unsigned /*numberOfOutputs*/)
</del><ins>+const size_t DefaultBufferSize = 4096;
+
+JavaScriptAudioNode::JavaScriptAudioNode(AudioContext* context, double sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
</ins><span class="cx">     : AudioNode(context, sampleRate)
</span><del>-    , m_currentBufferIndex(0)
-    , m_eventInputBuffer(0)
-    , m_eventOutputBuffer(0)
</del><ins>+    , m_doubleBufferIndex(0)
+    , m_doubleBufferIndexForEvent(0)
</ins><span class="cx">     , m_bufferSize(bufferSize)
</span><del>-    , m_readIndex(0)
-    , m_writeIndex(0)
</del><ins>+    , m_bufferIndex(0)
+    , m_isRequestOutstanding(false)
</ins><span class="cx"> {
</span><del>-    // FIXME: currently ignoring numberOfInputs and numberOfOutputs
-   
-    // In default case where no buffer size is given, set to reasonable size.
-    if (!m_bufferSize)
-        m_bufferSize = 4096;
</del><ins>+    // Check for valid buffer size.
+    switch (bufferSize) {
+    case 256:
+    case 512:
+    case 1024:
+    case 2048:
+    case 4096:
+    case 8192:
+    case 16384:
+        m_bufferSize = bufferSize;
+        break;
+    default:
+        m_bufferSize = DefaultBufferSize;
+    }
</ins><span class="cx">         
</span><ins>+    // Regardless of the allowed buffer sizes above, we still need to process at the granularity of the AudioNode.
</ins><span class="cx">     if (m_bufferSize &lt; AudioNode::ProcessingSizeInFrames)
</span><span class="cx">         m_bufferSize = AudioNode::ProcessingSizeInFrames;
</span><del>-        
-    if (m_bufferSize &gt; MaxBufferSize)
-        m_bufferSize = MaxBufferSize;
-    
-    // FIXME: right now we're hardcoded to a single stereo output
</del><ins>+
+    // FIXME: Right now we're hardcoded to single input and single output.
+    // Although the specification says this is OK for a simple implementation, multiple inputs and outputs would be good.
+    ASSERT_UNUSED(numberOfInputs, numberOfInputs == 1);
+    ASSERT_UNUSED(numberOfOutputs, numberOfOutputs == 1);
</ins><span class="cx">     addInput(adoptPtr(new AudioNodeInput(this)));
</span><span class="cx">     addOutput(adoptPtr(new AudioNodeOutput(this, 2)));
</span><span class="cx"> 
</span><span class="lines">@@ -84,9 +93,7 @@
</span><span class="cx"> 
</span><span class="cx">     double sampleRate = context()-&gt;sampleRate();
</span><span class="cx"> 
</span><del>-    // Use double-buffering on both input and output sides
-    
-    // FIXME: hardcoded to single stereo input and single stereo output
</del><ins>+    // Use double-buffering on both input and output sides    
</ins><span class="cx">     RefPtr&lt;AudioBuffer&gt; inputBuffer1 = AudioBuffer::create(2, bufferSize(), sampleRate);
</span><span class="cx">     RefPtr&lt;AudioBuffer&gt; inputBuffer2 = AudioBuffer::create(2, bufferSize(), sampleRate);
</span><span class="cx">     RefPtr&lt;AudioBuffer&gt; outputBuffer1 = AudioBuffer::create(2, bufferSize(), sampleRate);
</span><span class="lines">@@ -118,50 +125,79 @@
</span><span class="cx"> 
</span><span class="cx"> void JavaScriptAudioNode::process(size_t framesToProcess)
</span><span class="cx"> {
</span><del>-    int bufferIndex = currentBufferIndex();
-    AudioBuffer* inputBuffer = m_inputBuffers[bufferIndex].get();
-    AudioBuffer* outputBuffer = m_outputBuffers[bufferIndex].get();
-
</del><ins>+    // Get input and output busses.
</ins><span class="cx">     AudioBus* inputBus = this-&gt;input(0)-&gt;bus();
</span><span class="cx">     AudioBus* outputBus = this-&gt;output(0)-&gt;bus();
</span><span class="cx"> 
</span><del>-    // FIXME: make this more flexible
</del><ins>+    // Get input and output buffers.  We double-buffer both the input and output sides.
+    unsigned doubleBufferIndex = this-&gt;doubleBufferIndex();
+    bool isDoubleBufferIndexGood = doubleBufferIndex &lt; 2 &amp;&amp; doubleBufferIndex &lt; m_inputBuffers.size() &amp;&amp; doubleBufferIndex &lt; m_outputBuffers.size();
+    ASSERT(isDoubleBufferIndexGood);
+    if (!isDoubleBufferIndexGood)
+        return;
+    
+    AudioBuffer* inputBuffer = m_inputBuffers[doubleBufferIndex].get();
+    AudioBuffer* outputBuffer = m_outputBuffers[doubleBufferIndex].get();
+
+    // Check the consistency of input and output buffers.
+    bool buffersAreGood = inputBuffer &amp;&amp; outputBuffer &amp;&amp; bufferSize() == inputBuffer-&gt;length() &amp;&amp; bufferSize() == outputBuffer-&gt;length();
+    ASSERT(buffersAreGood);
+    if (!buffersAreGood)
+        return;
+
+    // We assume that bufferSize() is evenly divisible by framesToProcess - should always be true, but we should still check.
+    bool isFramesToProcessGood = framesToProcess &amp;&amp; bufferSize() &gt;= framesToProcess &amp;&amp; !(bufferSize() % framesToProcess);
+    ASSERT(isFramesToProcessGood);
+    if (!isFramesToProcessGood)
+        return;
+        
</ins><span class="cx">     unsigned numberOfInputChannels = inputBus-&gt;numberOfChannels();
</span><span class="cx">     
</span><del>-    bool channelsAreGood = (outputBus-&gt;numberOfChannels() == 2);
</del><ins>+    bool channelsAreGood = (numberOfInputChannels == 1 || numberOfInputChannels == 2) &amp;&amp; outputBus-&gt;numberOfChannels() == 2;
</ins><span class="cx">     ASSERT(channelsAreGood);
</span><span class="cx">     if (!channelsAreGood)
</span><span class="cx">         return;
</span><span class="cx"> 
</span><del>-    // FIXME: this code assumes that bufferSize() is evenly divisible by framesToProcess - should always be true, but we should still check.
-    float* inputL = inputBus-&gt;channel(0)-&gt;data();
-    float* inputR = numberOfInputChannels &gt; 1 ? inputBus-&gt;channel(1)-&gt;data() : 0;
-    float* outputL = outputBus-&gt;channel(0)-&gt;data();
-    float* outputR = outputBus-&gt;channel(1)-&gt;data();
</del><ins>+    float* sourceL = inputBus-&gt;channel(0)-&gt;data();
+    float* sourceR = numberOfInputChannels &gt; 1 ? inputBus-&gt;channel(1)-&gt;data() : 0;
+    float* destinationL = outputBus-&gt;channel(0)-&gt;data();
+    float* destinationR = outputBus-&gt;channel(1)-&gt;data();
</ins><span class="cx"> 
</span><del>-    // Copy from input to input buffer
-    memcpy(inputBuffer-&gt;getChannelData(0)-&gt;data() + m_writeIndex, inputL, sizeof(float) * framesToProcess);
</del><ins>+    // Copy from the input to the input buffer.
+    size_t bytesToCopy = sizeof(float) * framesToProcess;
+    memcpy(inputBuffer-&gt;getChannelData(0)-&gt;data() + m_bufferIndex, sourceL, bytesToCopy);
</ins><span class="cx">     
</span><del>-    if (numberOfInputChannels &gt; 1)
-        memcpy(inputBuffer-&gt;getChannelData(1)-&gt;data() + m_writeIndex, inputR, sizeof(float) * framesToProcess);
-    else {
-        // FIXME: this is a hack.
-        memcpy(inputBuffer-&gt;getChannelData(1)-&gt;data() + m_writeIndex, inputL, sizeof(float) * framesToProcess);
</del><ins>+    if (numberOfInputChannels == 2)
+        memcpy(inputBuffer-&gt;getChannelData(1)-&gt;data() + m_bufferIndex, sourceR, bytesToCopy);
+    else if (numberOfInputChannels == 1) {
+        // If the input is mono, then also copy the data to the right channel of the AudioBuffer which the AudioProcessingEvent uses.
+        // FIXME: present an AudioBuffer with the same number of channels as our input.
+        memcpy(inputBuffer-&gt;getChannelData(1)-&gt;data() + m_bufferIndex, sourceL, bytesToCopy);
</ins><span class="cx">     }
</span><span class="cx">     
</span><del>-    // Copy from output buffer to output
-    memcpy(outputL, outputBuffer-&gt;getChannelData(0)-&gt;data() + m_readIndex, sizeof(float) * framesToProcess);
-    memcpy(outputR, outputBuffer-&gt;getChannelData(1)-&gt;data() + m_readIndex, sizeof(float) * framesToProcess);
</del><ins>+    // Copy from the output buffer to the output.
+    memcpy(destinationL, outputBuffer-&gt;getChannelData(0)-&gt;data() + m_bufferIndex, bytesToCopy);
+    memcpy(destinationR, outputBuffer-&gt;getChannelData(1)-&gt;data() + m_bufferIndex, bytesToCopy);
</ins><span class="cx"> 
</span><del>-    m_writeIndex = (m_writeIndex + framesToProcess) % bufferSize();
-    m_readIndex = (m_readIndex + framesToProcess) % bufferSize(); // FIXME: consolidate read and write indices
</del><ins>+    // Update the buffering index.
+    m_bufferIndex = (m_bufferIndex + framesToProcess) % bufferSize();
</ins><span class="cx"> 
</span><del>-    // Check if it's time to fire an event and swap buffers...
-    if (!m_writeIndex) {
-        // Fire the event on the main thread, not this one (which is the realtime audio thread).
-        m_eventInputBuffer = inputBuffer;
-        m_eventOutputBuffer = outputBuffer;
-        callOnMainThread(fireProcessEventDispatch, this);
</del><ins>+    // m_bufferIndex will wrap back around to 0 when the current input and output buffers are full.
+    // When this happens, fire an event and swap buffers.
+    if (!m_bufferIndex) {
+        // Avoid building up requests on the main thread to fire process events when they're not being handled.
+        // This could be a problem if the main thread is very busy doing other things and is being held up handling previous requests.
+        if (m_isRequestOutstanding) {
+            // We're late in handling the previous request.  The main thread must be very busy.
+            // The best we can do is clear out the buffer ourself here.
+            outputBuffer-&gt;zero();            
+        } else {
+            // Fire the event on the main thread, not this one (which is the realtime audio thread).
+            m_doubleBufferIndexForEvent = m_doubleBufferIndex;
+            callOnMainThread(fireProcessEventDispatch, this);
+            m_isRequestOutstanding = true;
+        }
+
</ins><span class="cx">         swapBuffers();
</span><span class="cx">     }
</span><span class="cx"> }
</span><span class="lines">@@ -178,36 +214,39 @@
</span><span class="cx"> 
</span><span class="cx"> void JavaScriptAudioNode::fireProcessEvent()
</span><span class="cx"> {
</span><del>-    ASSERT(m_eventInputBuffer &amp;&amp; m_eventOutputBuffer);
-    if (!m_eventInputBuffer || !m_eventOutputBuffer)
</del><ins>+    ASSERT(isMainThread() &amp;&amp; m_isRequestOutstanding);
+    
+    bool isIndexGood = m_doubleBufferIndexForEvent == 0 || m_doubleBufferIndexForEvent == 1;
+    ASSERT(isIndexGood);
+    if (!isIndexGood)
</ins><span class="cx">         return;
</span><ins>+        
+    AudioBuffer* inputBuffer = m_inputBuffers[m_doubleBufferIndexForEvent].get();
+    AudioBuffer* outputBuffer = m_outputBuffers[m_doubleBufferIndexForEvent].get();
+    ASSERT(inputBuffer &amp;&amp; outputBuffer);
+    if (!inputBuffer || !outputBuffer)
+        return;
</ins><span class="cx"> 
</span><del>-    // FIXME: don't let events build up! - only create new one if old ones are being handled...
-
-    // Avoid firing event if document has already gone away.
</del><ins>+    // Avoid firing the event if the document has already gone away.
</ins><span class="cx">     if (context()-&gt;hasDocument()) {
</span><del>-        // Call JavaScript event handler which will process audio
-        dispatchEvent(AudioProcessingEvent::create(m_eventInputBuffer, m_eventOutputBuffer));
</del><ins>+        m_isRequestOutstanding = false;
+        
+        // Call the JavaScript event handler which will do the audio processing.
+        dispatchEvent(AudioProcessingEvent::create(inputBuffer, outputBuffer));
</ins><span class="cx">     }
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void JavaScriptAudioNode::reset()
</span><span class="cx"> {
</span><del>-    m_readIndex = 0;
-    m_writeIndex = 0;
-    m_currentBufferIndex = 0;
</del><ins>+    m_bufferIndex = 0;
+    m_doubleBufferIndex = 0;
</ins><span class="cx"> 
</span><del>-    for (unsigned int i = 0; i &lt; 2; ++i) {
</del><ins>+    for (unsigned i = 0; i &lt; 2; ++i) {
</ins><span class="cx">         m_inputBuffers[i]-&gt;zero();
</span><span class="cx">         m_outputBuffers[i]-&gt;zero();
</span><span class="cx">     }
</span><del>-
-    m_eventInputBuffer = 0;
-    m_eventOutputBuffer = 0;
</del><span class="cx"> }
</span><span class="cx"> 
</span><del>-// FIXME: important! Check what happens when the document goes away, but the context still exists and
-// one or more events have been dispatched but not yet handled...
</del><span class="cx"> ScriptExecutionContext* JavaScriptAudioNode::scriptExecutionContext() const
</span><span class="cx"> {
</span><span class="cx">     return const_cast&lt;JavaScriptAudioNode*&gt;(this)-&gt;context()-&gt;document();
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioJavaScriptAudioNodeh"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/JavaScriptAudioNode.h (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/JavaScriptAudioNode.h        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/JavaScriptAudioNode.h        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -27,7 +27,6 @@
</span><span class="cx"> 
</span><span class="cx"> #include &quot;ActiveDOMObject.h&quot;
</span><span class="cx"> #include &quot;AudioNode.h&quot;
</span><del>-#include &quot;Document.h&quot;
</del><span class="cx"> #include &quot;EventListener.h&quot;
</span><span class="cx"> #include &quot;EventTarget.h&quot;
</span><span class="cx"> #include &lt;wtf/PassRefPtr.h&gt;
</span><span class="lines">@@ -41,8 +40,18 @@
</span><span class="cx"> class AudioProcessingEvent;
</span><span class="cx"> class Float32Array;
</span><span class="cx"> 
</span><ins>+// JavaScriptAudioNode is an AudioNode which allows for arbitrary synthesis or processing directly using JavaScript.
+// The API allows for a variable number of inputs and outputs, although it must have at least one input or output.
+// This basic implementation supports no more than one input and output.
+// The &quot;onaudioprocess&quot; attribute is an event listener which will get called periodically with an AudioProcessingEvent which has
+// AudioBuffers for each input and output.
+
</ins><span class="cx"> class JavaScriptAudioNode : public AudioNode, public EventTarget {
</span><span class="cx"> public:
</span><ins>+    // bufferSize must be one of the following values: 256, 512, 1024, 2048, 4096, 8192, 16384.
+    // This value controls how frequently the onaudioprocess event handler is called and how many sample-frames need to be processed each call.
+    // Lower numbers for bufferSize will result in a lower (better) latency. Higher numbers will be necessary to avoid audio breakup and glitches.
+    // The value chosen must carefully balance between latency and audio quality.
</ins><span class="cx">     static PassRefPtr&lt;JavaScriptAudioNode&gt; create(AudioContext* context, double sampleRate, size_t bufferSize, unsigned numberOfInputs = 1, unsigned numberOfOutputs = 1)
</span><span class="cx">     {
</span><span class="cx">         return adoptRef(new JavaScriptAudioNode(context, sampleRate, bufferSize, numberOfInputs, numberOfOutputs));
</span><span class="lines">@@ -56,43 +65,41 @@
</span><span class="cx">     virtual void initialize();
</span><span class="cx">     virtual void uninitialize();
</span><span class="cx"> 
</span><del>-    // Double buffering
-    size_t bufferSize() const { return m_bufferSize; }
-    int currentBufferIndex() const { return m_currentBufferIndex; }
-    void swapBuffers() { m_currentBufferIndex = 1 - m_currentBufferIndex; }
-
-    static void fireProcessEventDispatch(void* userData);
-    void fireProcessEvent();
-
-    // Reconcile ref/deref which are defined both in AudioNode and EventTarget.
-    using AudioNode::ref;
-    using AudioNode::deref;
-
</del><span class="cx">     // EventTarget
</span><span class="cx">     virtual ScriptExecutionContext* scriptExecutionContext() const;
</span><span class="cx">     virtual JavaScriptAudioNode* toJavaScriptAudioNode();
</span><span class="cx">     virtual EventTargetData* eventTargetData() { return &amp;m_eventTargetData; }
</span><span class="cx">     virtual EventTargetData* ensureEventTargetData()  { return &amp;m_eventTargetData; }
</span><span class="cx"> 
</span><ins>+    size_t bufferSize() const { return m_bufferSize; }
+
</ins><span class="cx">     DEFINE_ATTRIBUTE_EVENT_LISTENER(audioprocess);
</span><ins>+
+    // Reconcile ref/deref which are defined both in AudioNode and EventTarget.
+    using AudioNode::ref;
+    using AudioNode::deref;
</ins><span class="cx">     
</span><span class="cx"> private:
</span><span class="cx">     JavaScriptAudioNode(AudioContext*, double sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs);
</span><span class="cx"> 
</span><del>-    virtual void refEventTarget() { ref(); }
-    virtual void derefEventTarget() { deref(); }
-    EventTargetData m_eventTargetData;
</del><ins>+    static void fireProcessEventDispatch(void* userData);
+    void fireProcessEvent();
</ins><span class="cx"> 
</span><span class="cx">     // Double buffering
</span><del>-    int m_currentBufferIndex;
</del><ins>+    unsigned doubleBufferIndex() const { return m_doubleBufferIndex; }
+    void swapBuffers() { m_doubleBufferIndex = 1 - m_doubleBufferIndex; }
+    int m_doubleBufferIndex;
+    int m_doubleBufferIndexForEvent;
</ins><span class="cx">     Vector&lt;RefPtr&lt;AudioBuffer&gt; &gt; m_inputBuffers;
</span><span class="cx">     Vector&lt;RefPtr&lt;AudioBuffer&gt; &gt; m_outputBuffers;
</span><del>-    AudioBuffer* m_eventInputBuffer;
-    AudioBuffer* m_eventOutputBuffer;
</del><span class="cx"> 
</span><ins>+    virtual void refEventTarget() { ref(); }
+    virtual void derefEventTarget() { deref(); }
+    EventTargetData m_eventTargetData;
+
</ins><span class="cx">     size_t m_bufferSize;
</span><del>-    int m_readIndex;
-    int m_writeIndex;
</del><ins>+    int m_bufferIndex;
+    volatile bool m_isRequestOutstanding;
</ins><span class="cx"> };
</span><span class="cx"> 
</span><span class="cx"> } // namespace WebCore
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioJavaScriptAudioNodeidl"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/JavaScriptAudioNode.idl (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/JavaScriptAudioNode.idl        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/JavaScriptAudioNode.idl        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -27,6 +27,7 @@
</span><span class="cx">     interface [
</span><span class="cx">         Conditional=WEB_AUDIO,
</span><span class="cx">         GenerateToJS,
</span><ins>+        CustomMarkFunction,
</ins><span class="cx"> #if defined(V8_BINDING) &amp;&amp; V8_BINDING
</span><span class="cx">         EventTarget
</span><span class="cx"> #endif
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioRealtimeAnalysercpp"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/RealtimeAnalyser.cpp (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/RealtimeAnalyser.cpp        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/RealtimeAnalyser.cpp        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -29,7 +29,6 @@
</span><span class="cx"> #include &quot;RealtimeAnalyser.h&quot;
</span><span class="cx"> 
</span><span class="cx"> #include &quot;AudioBus.h&quot;
</span><del>-#include &quot;Biquad.h&quot;
</del><span class="cx"> #include &quot;FFTFrame.h&quot;
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(3D_CANVAS)
</span><span class="lines">@@ -37,21 +36,29 @@
</span><span class="cx"> #include &quot;Uint8Array.h&quot;
</span><span class="cx"> #endif
</span><span class="cx"> 
</span><ins>+#include &lt;algorithm&gt;
</ins><span class="cx"> #include &lt;wtf/Complex.h&gt;
</span><del>-#include &lt;algorithm&gt;
</del><span class="cx"> 
</span><span class="cx"> using namespace std;
</span><span class="cx"> 
</span><span class="cx"> namespace WebCore {
</span><span class="cx"> 
</span><ins>+const double RealtimeAnalyser::DefaultSmoothingTimeConstant  = 0.8;
+const double RealtimeAnalyser::DefaultMinDecibels = -100.0;
+const double RealtimeAnalyser::DefaultMaxDecibels = -30.0;
+
+const unsigned RealtimeAnalyser::DefaultFFTSize = 2048;
+const unsigned RealtimeAnalyser::MaxFFTSize = 2048;
+const unsigned RealtimeAnalyser::InputBufferSize = RealtimeAnalyser::DefaultFFTSize * 2;
+
</ins><span class="cx"> RealtimeAnalyser::RealtimeAnalyser()
</span><span class="cx">     : m_fftSize(DefaultFFTSize)
</span><span class="cx">     , m_inputBuffer(InputBufferSize)
</span><span class="cx">     , m_writeIndex(0)
</span><span class="cx">     , m_frequencyData(DefaultFFTSize / 2)
</span><del>-    , m_smoothingTimeConstant(0.8)
-    , m_minDecibels(-100.0)
-    , m_maxDecibels(-30.0)
</del><ins>+    , m_smoothingTimeConstant(DefaultSmoothingTimeConstant)
+    , m_minDecibels(DefaultMinDecibels)
+    , m_maxDecibels(DefaultMaxDecibels)
</ins><span class="cx"> {
</span><span class="cx">     m_analysisFrame = adoptPtr(new FFTFrame(DefaultFFTSize));
</span><span class="cx"> }
</span><span class="lines">@@ -66,7 +73,7 @@
</span><span class="cx">     m_inputBuffer.zero();
</span><span class="cx"> }
</span><span class="cx"> 
</span><del>-void RealtimeAnalyser::setFftSize(unsigned size)
</del><ins>+void RealtimeAnalyser::setFftSize(size_t size)
</ins><span class="cx"> {
</span><span class="cx">     if (m_fftSize != size) {
</span><span class="cx">         // FIXME: make thread safe
</span><span class="lines">@@ -108,26 +115,27 @@
</span><span class="cx"> 
</span><span class="cx">     m_writeIndex += framesToProcess;
</span><span class="cx"> 
</span><del>-    if (m_writeIndex == InputBufferSize) m_writeIndex = 0;
</del><ins>+    if (m_writeIndex == InputBufferSize)
+        m_writeIndex = 0;
</ins><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> void RealtimeAnalyser::doFFTAnalysis()
</span><span class="cx"> {    
</span><del>-    // Unroll the input buffer into a temp buffer, where we'll apply an analysis window followed by an FFT
</del><ins>+    // Unroll the input buffer into a temp buffer, where we'll apply an analysis window followed by an FFT.
</ins><span class="cx">     size_t fftSize = this-&gt;fftSize();
</span><span class="cx">     
</span><span class="cx">     AudioFloatArray tempBuffer(fftSize);
</span><span class="cx">     float* inputBufferP = m_inputBuffer.data();
</span><span class="cx">     float* tempP = tempBuffer.data();
</span><ins>+
</ins><span class="cx">     // FIXME : optimize with memcpy()
</span><del>-    for (unsigned i = 0; i &lt; fftSize; ++i) {
</del><ins>+    for (unsigned i = 0; i &lt; fftSize; ++i)
</ins><span class="cx">         tempP[i] = inputBufferP[(i + m_writeIndex - fftSize + InputBufferSize) % InputBufferSize];
</span><del>-    }
</del><span class="cx">     
</span><del>-    // First, window the input samples
</del><ins>+    // Window the input samples.
</ins><span class="cx">     ApplyWindow(tempP, fftSize);
</span><span class="cx">     
</span><del>-    // Do the analysis
</del><ins>+    // Do the analysis.
</ins><span class="cx">     m_analysisFrame-&gt;doFFT(tempP);
</span><span class="cx"> 
</span><span class="cx">     float* analysisDataP = frequencyData().data();
</span><span class="lines">@@ -137,15 +145,15 @@
</span><span class="cx">     float* realP = m_analysisFrame-&gt;realData();
</span><span class="cx">     float* imagP = m_analysisFrame-&gt;imagData();
</span><span class="cx"> 
</span><del>-    imagP[0] = 0.0f; // blow away packed nyquist component
</del><ins>+    // Blow away the packed nyquist component.
+    imagP[0] = 0.0f;
</ins><span class="cx">     
</span><span class="cx">     double k = m_smoothingTimeConstant;
</span><span class="cx">         
</span><del>-    // normalize so an input pure sine wave at 0dBfs registers as 0dBfs
-    // (undo FFT scaling factor)
</del><ins>+    // Normalize so than an input sine wave at 0dBfs registers as 0dBfs (undo FFT scaling factor).
</ins><span class="cx">     const double kMagnitudeScale = 1.0 / DefaultFFTSize;
</span><span class="cx">     
</span><del>-    // Convert from complex to magnitude
</del><ins>+    // Convert from complex to magnitude.
</ins><span class="cx">     for (unsigned i = 0; i &lt; n; ++i) {
</span><span class="cx">         Complex c(realP[i], imagP[i]);
</span><span class="cx"> 
</span><span class="lines">@@ -170,9 +178,9 @@
</span><span class="cx">         float* sourceP = const_cast&lt;RealtimeAnalyser*&gt;(this)-&gt;m_frequencyData.data();
</span><span class="cx">         float* data = array-&gt;data();
</span><span class="cx">         
</span><del>-        for (unsigned i = 0; i &lt; len; i++) {
</del><ins>+        for (unsigned i = 0; i &lt; len; ++i) {
</ins><span class="cx">             float value = sourceP[i];
</span><del>-            double dbMag = value == 0.0 ? -100.0 : 20.0 * log10(value);
</del><ins>+            double dbMag = !value ? -100.0 : 20.0 * log10(value);
</ins><span class="cx">             data[i] = float(dbMag);
</span><span class="cx">         }
</span><span class="cx">     }
</span><span class="lines">@@ -190,14 +198,14 @@
</span><span class="cx">         float* sourceP = const_cast&lt;RealtimeAnalyser*&gt;(this)-&gt;m_frequencyData.data();
</span><span class="cx">         unsigned char* data = array-&gt;data();
</span><span class="cx">         
</span><del>-        for (unsigned i = 0; i &lt; len; i++) {
</del><ins>+        for (unsigned i = 0; i &lt; len; ++i) {
</ins><span class="cx">             float value = sourceP[i];
</span><del>-            double dbMag = value == 0.0 ? -200.0 : 20.0 * log10(value);
</del><ins>+            double dbMag = !value ? -200.0 : 20.0 * log10(value);
</ins><span class="cx">             
</span><del>-            // now, scale for unsigned byte value
</del><ins>+            // Scale for unsigned byte value.
</ins><span class="cx">             double scaledValue = 255.0 * (dbMag - m_minDecibels) / (m_maxDecibels - m_minDecibels);
</span><span class="cx"> 
</span><del>-            // clip
</del><ins>+            // Clip to valid range.
</ins><span class="cx">             if (scaledValue &lt; 0.0)
</span><span class="cx">                 scaledValue = 0.0;
</span><span class="cx">             if (scaledValue &gt; 255.0)
</span><span class="lines">@@ -213,19 +221,19 @@
</span><span class="cx">     if (!array)
</span><span class="cx">         return;
</span><span class="cx">         
</span><del>-    unsigned int fftSize = this-&gt;fftSize();
</del><ins>+    unsigned fftSize = this-&gt;fftSize();
</ins><span class="cx">     size_t len = min(fftSize, array-&gt;length());
</span><span class="cx">     if (len &gt; 0) {
</span><span class="cx">         float* inputBufferP = const_cast&lt;RealtimeAnalyser*&gt;(this)-&gt;m_inputBuffer.data();
</span><span class="cx">         unsigned char* data = array-&gt;data();
</span><span class="cx">         
</span><del>-        for (unsigned i = 0; i &lt; len; i++) {
</del><ins>+        for (unsigned i = 0; i &lt; len; ++i) {
</ins><span class="cx">             float value = inputBufferP[(i + m_writeIndex - fftSize + InputBufferSize) % InputBufferSize];
</span><span class="cx"> 
</span><span class="cx">             // Scale from nominal -1.0 -&gt; +1.0 to unsigned byte.
</span><span class="cx">             double scaledValue = 128.0 * (value + 1.0);
</span><span class="cx"> 
</span><del>-            // clip
</del><ins>+            // Clip to valid range.
</ins><span class="cx">             if (scaledValue &lt; 0.0)
</span><span class="cx">                 scaledValue = 0.0;
</span><span class="cx">             if (scaledValue &gt; 255.0)
</span><span class="lines">@@ -238,6 +246,6 @@
</span><span class="cx"> 
</span><span class="cx"> #endif // 3D_CANVAS
</span><span class="cx"> 
</span><del>-}  // namespace WebCore
</del><ins>+} // namespace WebCore
</ins><span class="cx"> 
</span><span class="cx"> #endif // ENABLE(WEB_AUDIO)
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioRealtimeAnalyserh"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/RealtimeAnalyser.h (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/RealtimeAnalyser.h        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/RealtimeAnalyser.h        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -26,10 +26,8 @@
</span><span class="cx"> #define RealtimeAnalyser_h
</span><span class="cx"> 
</span><span class="cx"> #include &quot;AudioArray.h&quot;
</span><del>-
</del><ins>+#include &lt;wtf/NonCopyable.h&gt;
</ins><span class="cx"> #include &lt;wtf/OwnPtr.h&gt;
</span><del>-#include &lt;wtf/PassRefPtr.h&gt;
-#include &lt;wtf/RefCounted.h&gt;
</del><span class="cx"> 
</span><span class="cx"> namespace WebCore {
</span><span class="cx"> 
</span><span class="lines">@@ -43,50 +41,49 @@
</span><span class="cx"> 
</span><span class="cx"> class RealtimeAnalyser : public Noncopyable {
</span><span class="cx"> public:
</span><del>-    enum { DefaultFFTSize = 2048 };
-    enum { MaxFFTSize = 2048 };
-    enum { InputBufferSize = DefaultFFTSize * 2 };
-    
</del><span class="cx">     RealtimeAnalyser();
</span><span class="cx">     virtual ~RealtimeAnalyser();
</span><span class="cx">     
</span><span class="cx">     void reset();
</span><span class="cx"> 
</span><del>-    // Javascript bindings
-    unsigned fftSize() const { return m_fftSize; }
-    void setFftSize(unsigned size);
</del><ins>+    size_t fftSize() const { return m_fftSize; }
+    void setFftSize(size_t size);
</ins><span class="cx"> 
</span><span class="cx">     unsigned frequencyBinCount() const { return m_fftSize / 2; }
</span><span class="cx"> 
</span><span class="cx">     void setMinDecibels(float k) { m_minDecibels = k; }
</span><del>-    float minDecibels() const { return (float)m_minDecibels; }
</del><ins>+    float minDecibels() const { return static_cast&lt;float&gt;(m_minDecibels); }
</ins><span class="cx"> 
</span><span class="cx">     void setMaxDecibels(float k) { m_maxDecibels = k; }
</span><del>-    float maxDecibels() const { return (float)m_maxDecibels; }
</del><ins>+    float maxDecibels() const { return static_cast&lt;float&gt;(m_maxDecibels); }
</ins><span class="cx"> 
</span><span class="cx">     void setSmoothingTimeConstant(float k) { m_smoothingTimeConstant = k; }
</span><del>-    float smoothingTimeConstant() const { return (float)m_smoothingTimeConstant; }
</del><ins>+    float smoothingTimeConstant() const { return static_cast&lt;float&gt;(m_smoothingTimeConstant); }
</ins><span class="cx"> 
</span><span class="cx"> #if ENABLE(3D_CANVAS)
</span><del>-    // var freqData = new Float32Array(2048);
-    void getFloatFrequencyData(Float32Array* array);
-    void getByteFrequencyData(Uint8Array* array);
-    void getByteTimeDomainData(Uint8Array* array);
</del><ins>+    void getFloatFrequencyData(Float32Array*);
+    void getByteFrequencyData(Uint8Array*);
+    void getByteTimeDomainData(Uint8Array*);
</ins><span class="cx"> #endif
</span><span class="cx"> 
</span><del>-    void writeInput(AudioBus* bus, size_t framesToProcess);
</del><ins>+    void writeInput(AudioBus*, size_t framesToProcess);
</ins><span class="cx">     void doFFTAnalysis();
</span><span class="cx"> 
</span><span class="cx">     AudioFloatArray&amp; frequencyData() { return m_frequencyData; }
</span><span class="cx"> 
</span><del>-    void ref() { };
-    void deref() { };
</del><ins>+    static const double DefaultSmoothingTimeConstant;
+    static const double DefaultMinDecibels;
+    static const double DefaultMaxDecibels;
</ins><span class="cx"> 
</span><ins>+    static const unsigned DefaultFFTSize;
+    static const unsigned MaxFFTSize;
+    static const unsigned InputBufferSize;
+
</ins><span class="cx"> private:
</span><del>-    unsigned m_fftSize;
</del><ins>+    size_t m_fftSize;
</ins><span class="cx">     AudioFloatArray m_inputBuffer;
</span><del>-    int m_writeIndex;
</del><span class="cx">     OwnPtr&lt;FFTFrame&gt; m_analysisFrame;
</span><ins>+    unsigned m_writeIndex;
</ins><span class="cx">     
</span><span class="cx">     AudioFloatArray m_frequencyData;
</span><span class="cx">     double m_smoothingTimeConstant;
</span><span class="lines">@@ -95,6 +92,6 @@
</span><span class="cx">     double m_maxDecibels;
</span><span class="cx"> };
</span><span class="cx"> 
</span><del>-}  // namespace WebCore
</del><ins>+} // namespace WebCore
</ins><span class="cx"> 
</span><del>-#endif  // RealtimeAnalyser_h
</del><ins>+#endif // RealtimeAnalyser_h
</ins></span></pre></div>
<a id="branchesaudioWebCorewebaudioRealtimeAnalyserNodecpp"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/RealtimeAnalyserNode.cpp (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/RealtimeAnalyserNode.cpp        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/RealtimeAnalyserNode.cpp        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -51,29 +51,25 @@
</span><span class="cx"> 
</span><span class="cx"> void RealtimeAnalyserNode::process(size_t framesToProcess)
</span><span class="cx"> {
</span><del>-    AudioBus* destination = output(0)-&gt;bus();
</del><ins>+    AudioBus* outputBus = output(0)-&gt;bus();
</ins><span class="cx"> 
</span><span class="cx">     if (!isInitialized() || !input(0)-&gt;isConnected()) {
</span><del>-        destination-&gt;zero();
</del><ins>+        outputBus-&gt;zero();
</ins><span class="cx">         return;
</span><span class="cx">     }
</span><span class="cx"> 
</span><del>-    AudioBus* source = input(0)-&gt;bus();
</del><ins>+    AudioBus* inputBus = input(0)-&gt;bus();
</ins><span class="cx">     
</span><del>-    m_analyser.writeInput(source, framesToProcess);
</del><ins>+    m_analyser.writeInput(inputBus, framesToProcess);
</ins><span class="cx"> 
</span><del>-    // Our override of pullInputs() should just pass the audio data through unchanged if the
-    // channel count matches from input to output (resulting in source == destination), otherwise, for now, at least handle mono -&gt; stereo.
-    // m_passThroughProcessor should be optimized to do the fastest thing for each case.
-    m_passThroughProcessor.pan(0.0, 0.0, source, destination, framesToProcess);
</del><ins>+    // For in-place processing, our override of pullInputs() should just pass the audio data through unchanged if the channel count matches from input to output
+    // (resulting in inputBus == outputBus).  Otherwise, do an up-mix to stereo.
+    if (inputBus != outputBus)
+        outputBus-&gt;copyFrom(*inputBus);
</ins><span class="cx"> }
</span><span class="cx"> 
</span><del>-void RealtimeAnalyserNode::reset()
-{
-    m_analyser.reset();
-}
-
-// Nice optimization allowing this node to operate as simply a pass-through (no processing) from input to output.
</del><ins>+// We override pullInputs() as an optimization allowing this node to take advantage of in-place processing,
+// where the input is simply passed through unprocessed to the output.
</ins><span class="cx"> // Note: this only applies if the input and output channel counts match.
</span><span class="cx"> void RealtimeAnalyserNode::pullInputs(size_t framesToProcess)
</span><span class="cx"> {
</span><span class="lines">@@ -81,6 +77,11 @@
</span><span class="cx">     input(0)-&gt;pull(output(0)-&gt;bus(), framesToProcess);
</span><span class="cx"> }
</span><span class="cx"> 
</span><ins>+void RealtimeAnalyserNode::reset()
+{
+    m_analyser.reset();
+}
+
</ins><span class="cx"> } // namespace WebCore
</span><span class="cx"> 
</span><span class="cx"> #endif // ENABLE(WEB_AUDIO)
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioRealtimeAnalyserNodeh"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/RealtimeAnalyserNode.h (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/RealtimeAnalyserNode.h        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/RealtimeAnalyserNode.h        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -26,7 +26,6 @@
</span><span class="cx"> #define RealtimeAnalyserNode_h
</span><span class="cx"> 
</span><span class="cx"> #include &quot;AudioNode.h&quot;
</span><del>-#include &quot;PassThroughPanner.h&quot;
</del><span class="cx"> #include &quot;RealtimeAnalyser.h&quot;
</span><span class="cx"> 
</span><span class="cx"> namespace WebCore {
</span><span class="lines">@@ -42,10 +41,9 @@
</span><span class="cx">     
</span><span class="cx">     // AudioNode
</span><span class="cx">     virtual void process(size_t framesToProcess);
</span><ins>+    virtual void pullInputs(size_t framesToProcess);
</ins><span class="cx">     virtual void reset();
</span><span class="cx"> 
</span><del>-    virtual void pullInputs(size_t framesToProcess);
-
</del><span class="cx">     // Javascript bindings
</span><span class="cx">     unsigned int fftSize() const { return m_analyser.fftSize(); }
</span><span class="cx">     void setFftSize(unsigned int size) { m_analyser.setFftSize(size); }
</span><span class="lines">@@ -71,7 +69,6 @@
</span><span class="cx">     RealtimeAnalyserNode(AudioContext*, double sampleRate);
</span><span class="cx"> 
</span><span class="cx">     RealtimeAnalyser m_analyser;
</span><del>-    PassThroughPanner m_passThroughProcessor;
</del><span class="cx"> };
</span><span class="cx"> 
</span><span class="cx"> } // namespace WebCore
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioRealtimeAnalyserNodeidl"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/RealtimeAnalyserNode.idl (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/RealtimeAnalyserNode.idl        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/RealtimeAnalyserNode.idl        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -30,18 +30,15 @@
</span><span class="cx">         attribute unsigned long fftSize;
</span><span class="cx">         readonly attribute unsigned long frequencyBinCount;
</span><span class="cx"> 
</span><del>-        // minDecibels / maxDecibels represent the range in which to scale the FFT analysis data
-        // for conversion to unsigned byte values
</del><ins>+        // minDecibels / maxDecibels represent the range to scale the FFT analysis data for conversion to unsigned byte values.
</ins><span class="cx">         attribute float minDecibels;
</span><span class="cx">         attribute float maxDecibels;
</span><span class="cx">         
</span><del>-        // A value from 0.0 -&gt; 1.0 where 0.0 represents no time averaging with the last
-        // analysis frame
</del><ins>+        // A value from 0.0 -&gt; 1.0 where 0.0 represents no time averaging with the last analysis frame.
</ins><span class="cx">         attribute float smoothingTimeConstant;
</span><span class="cx">         
</span><span class="cx">         // Copies the current frequency data into the passed array.
</span><del>-        // If the array has fewer elements than the frequencyBinCount,
-        // the excess elements will be dropped.
</del><ins>+        // If the array has fewer elements than the frequencyBinCount, the excess elements will be dropped.
</ins><span class="cx">         [Conditional=3D_CANVAS] void getFloatFrequencyData(in Float32Array array);
</span><span class="cx">         [Conditional=3D_CANVAS] void getByteFrequencyData(in Uint8Array array);
</span><span class="cx"> 
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioV8AudioContextCustomcpp"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/V8AudioContextCustom.cpp (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/V8AudioContextCustom.cpp        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/V8AudioContextCustom.cpp        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -29,24 +29,8 @@
</span><span class="cx"> #include &quot;V8AudioContext.h&quot;
</span><span class="cx"> 
</span><span class="cx"> #include &quot;AudioContext.h&quot;
</span><del>-#include &quot;AudioNode.h&quot;
-#include &quot;V8AudioGainNode.h&quot;
-#include &quot;V8AudioNode.h&quot;
-#include &quot;V8LowPass2FilterNode.h&quot;
-#include &quot;V8HighPass2FilterNode.h&quot;
-#include &quot;V8AudioPannerNode.h&quot;
-#include &quot;V8ConvolverNode.h&quot;
-#include &quot;V8RealtimeAnalyserNode.h&quot;
-
-#include &quot;AudioGainNode.h&quot;
-#include &quot;LowPass2FilterNode.h&quot;
-#include &quot;HighPass2FilterNode.h&quot;
-#include &quot;AudioPannerNode.h&quot;
-#include &quot;ConvolverNode.h&quot;
-#include &quot;RealtimeAnalyserNode.h&quot;
-
-#include &quot;V8Proxy.h&quot;
</del><span class="cx"> #include &quot;Frame.h&quot;
</span><ins>+#include &quot;V8Proxy.h&quot;
</ins><span class="cx"> 
</span><span class="cx"> namespace WebCore {
</span><span class="cx"> 
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioV8AudioMiscCustomcpp"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/V8AudioMiscCustom.cpp (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/V8AudioMiscCustom.cpp        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/V8AudioMiscCustom.cpp        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -26,16 +26,13 @@
</span><span class="cx"> 
</span><span class="cx"> #if ENABLE(WEB_AUDIO)
</span><span class="cx"> 
</span><del>-#include &quot;V8AudioNode.h&quot;
-
-#include &quot;AudioBufferSourceNode.h&quot;
</del><span class="cx"> #include &quot;AudioBuffer.h&quot;
</span><ins>+#include &quot;AudioBufferSourceNode.h&quot;
</ins><span class="cx"> #include &quot;AudioListener.h&quot;
</span><span class="cx"> #include &quot;ConvolverNode.h&quot;
</span><del>-
-#include &quot;V8AudioBufferSourceNode.h&quot;
</del><span class="cx"> #include &quot;V8AudioBuffer.h&quot;
</span><del>-#include &quot;V8AudioListener.h&quot;
</del><ins>+#include &quot;V8AudioBufferSourceNode.h&quot;
+#include &quot;V8AudioNode.h&quot;
</ins><span class="cx"> #include &quot;V8ConvolverNode.h&quot;
</span><span class="cx"> 
</span><span class="cx"> using namespace v8;
</span></span></pre></div>
<a id="branchesaudioWebCorewebaudioV8AudioNodeCustomcpp"></a>
<div class="modfile"><h4>Modified: branches/audio/WebCore/webaudio/V8AudioNodeCustom.cpp (70588 => 70589)</h4>
<pre class="diff"><span>
<span class="info">--- branches/audio/WebCore/webaudio/V8AudioNodeCustom.cpp        2010-10-26 23:39:25 UTC (rev 70588)
+++ branches/audio/WebCore/webaudio/V8AudioNodeCustom.cpp        2010-10-26 23:45:21 UTC (rev 70589)
</span><span class="lines">@@ -29,52 +29,63 @@
</span><span class="cx"> #include &quot;V8AudioNode.h&quot;
</span><span class="cx"> 
</span><span class="cx"> #include &quot;AudioNode.h&quot;
</span><del>-
</del><span class="cx"> #include &quot;ExceptionCode.h&quot;
</span><span class="cx"> #include &quot;NotImplemented.h&quot;
</span><del>-#include &lt;wtf/FastMalloc.h&gt;
</del><span class="cx"> #include &quot;V8Binding.h&quot;
</span><span class="cx"> #include &quot;V8Proxy.h&quot;
</span><span class="cx"> 
</span><del>-
</del><span class="cx"> namespace WebCore {
</span><span class="cx"> 
</span><span class="cx"> v8::Handle&lt;v8::Value&gt; V8AudioNode::connectCallback(const v8::Arguments&amp; args)
</span><del>-{
-    AudioNode* audioNode = toNative(args.Holder());
</del><ins>+{    
+    if (args.Length() &lt; 1)
+        return throwError(&quot;Not enough arguments&quot;, V8Proxy::SyntaxError);
</ins><span class="cx"> 
</span><del>-    unsigned output = 0;
-    unsigned input = 0;
</del><ins>+    if (args.Length() &gt; 3)
+        return throwError(&quot;Too many arguments&quot;, V8Proxy::SyntaxError);
</ins><span class="cx">     
</span><del>-    if (args.Length() &lt; 1)
-        return v8::Undefined(); // FIXME: should throw exception
-    
</del><span class="cx">     AudioNode* destinationNode = toNative(args[0]-&gt;ToObject());
</span><ins>+    if (!destinationNode)
+        return throwError(&quot;Invalid destination node&quot;, V8Proxy::SyntaxError);
</ins><span class="cx">     
</span><ins>+    unsigned output = 0;
+    unsigned input = 0;
</ins><span class="cx">     bool ok = false;
</span><span class="cx">     if (args.Length() &gt; 1)
</span><span class="cx">         output = toInt32(args[1], ok);
</span><span class="cx"> 
</span><span class="cx">     if (args.Length() &gt; 2)
</span><span class="cx">         input = toInt32(args[2], ok);
</span><ins>+        
+    if (!ok)
+        return throwError(&quot;Invalid index parameters&quot;, V8Proxy::SyntaxError);
</ins><span class="cx"> 
</span><del>-    audioNode-&gt;connect(destinationNode, output, input);
</del><ins>+    AudioNode* audioNode = toNative(args.Holder());
+    bool success = audioNode-&gt;connect(destinationNode, output, input);
+    if (!success)
+        return throwError(&quot;Invalid index parameter&quot;, V8Proxy::SyntaxError);
+
</ins><span class="cx">     return v8::Undefined();
</span><span class="cx"> }
</span><span class="cx"> 
</span><span class="cx"> v8::Handle&lt;v8::Value&gt; V8AudioNode::disconnectCallback(const v8::Arguments&amp; args)
</span><del>-{
-    AudioNode* audioNode = toNative(args.Holder());
-    unsigned output = 0;
-    
</del><ins>+{    
</ins><span class="cx">     if (args.Length() &gt; 1)
</span><del>-        return v8::Undefined(); // FIXME: should throw exception
</del><ins>+        return throwError(&quot;Too many arguments&quot;, V8Proxy::SyntaxError);
</ins><span class="cx">     
</span><ins>+    unsigned output = 0;
</ins><span class="cx">     bool ok = false;
</span><span class="cx">     if (args.Length() &gt; 0)
</span><span class="cx">         output = toInt32(args[0], ok);
</span><span class="cx"> 
</span><del>-    audioNode-&gt;disconnect(output);
</del><ins>+    if (!ok)
+        return throwError(&quot;Invalid index parameters&quot;, V8Proxy::SyntaxError);
+
+    AudioNode* audioNode = toNative(args.Holder());
+    bool success = audioNode-&gt;disconnect(output);
+    if (!success)
+        return throwError(&quot;Invalid index parameter&quot;, V8Proxy::SyntaxError);
+
</ins><span class="cx">     return v8::Undefined();
</span><span class="cx"> }
</span><span class="cx"> 
</span></span></pre>
</div>
</div>

</body>
</html>