This repository has been archived by the owner on Jul 18, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 12
/
utils.js
116 lines (110 loc) · 3.65 KB
/
utils.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
/**
* Copyright 2015 IBM Corp. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http:https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
// For non-view logic
var $ = require('jquery');
var fileBlock = function(_offset, length, _file, readChunk) {
var r = new FileReader();
var blob = _file.slice(_offset, length + _offset);
r.onload = readChunk;
r.readAsArrayBuffer(blob);
};
// Based on alediaferia's SO response
// http:https://stackoverflow.com/questions/14438187/javascript-filereader-parsing-long-file-in-chunks
exports.onFileProgress = function(options, ondata, running, onerror, onend, samplingRate) {
var file = options.file;
var fileSize = file.size;
var chunkSize = options.bufferSize || 16000; // in bytes
var offset = 0;
var readChunk = function(evt) {
if (offset >= fileSize) {
console.log('Done reading file');
onend();
return;
}
if (!running()) {
return;
}
if (evt.target.error == null) {
var buffer = evt.target.result;
var len = buffer.byteLength;
offset += len;
// console.log('sending: ' + len);
ondata(buffer); // callback for handling read chunk
} else {
var errorMessage = evt.target.error;
console.log('Read error: ' + errorMessage);
onerror(errorMessage);
return;
}
// use this timeout to pace the data upload for the playSample case,
// the idea is that the hyps do not arrive before the audio is played back
if (samplingRate) {
// console.log('samplingRate: ' +
// samplingRate + ' timeout: ' + (chunkSize * 1000) / (samplingRate * 2));
setTimeout(function() {
fileBlock(offset, chunkSize, file, readChunk);
}, (chunkSize * 1000) / (samplingRate * 2));
} else {
fileBlock(offset, chunkSize, file, readChunk);
}
};
fileBlock(offset, chunkSize, file, readChunk);
};
exports.createTokenGenerator = function() {
// Make call to API to try and get token
var hasBeenRunTimes = 0;
return {
getToken: function(callback) {
++hasBeenRunTimes;
if (hasBeenRunTimes > 5) {
var err = new Error('Cannot reach server');
callback(null, err);
return;
}
var url = '/api/token';
var tokenRequest = new XMLHttpRequest();
tokenRequest.open('POST', url, true);
tokenRequest.setRequestHeader('csrf-token',$('meta[name="ct"]').attr('content'));
tokenRequest.onreadystatechange = function() {
if (tokenRequest.readyState === 4) {
if (tokenRequest.status === 200) {
var token = tokenRequest.responseText;
callback(null, token);
} else {
var error = 'Cannot reach server';
if (tokenRequest.responseText){
try {
error = JSON.parse(tokenRequest.responseText);
} catch (e) {
error = tokenRequest.responseText;
}
}
callback(error);
}
}
};
tokenRequest.send();
},
getCount: function() { return hasBeenRunTimes; }
};
};
exports.initPubSub = function() {
var o = $({});
$.subscribe = o.on.bind(o);
$.unsubscribe = o.off.bind(o);
$.publish = o.trigger.bind(o);
};