Anki audio download (#477)

* Update how audio is added to Anki cards

* Upgrade Anki templates

* Update comments
This commit is contained in:
toasted-nutbread 2020-05-02 12:50:16 -04:00 committed by GitHub
parent 08ada6844a
commit cae6b657ab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 113 additions and 37 deletions

View File

@ -14,7 +14,9 @@
{{~/if~}}
{{/inline}}
{{#*inline "audio"}}{{/inline}}
{{#*inline "audio"~}}
[sound:{{definition.audioFileName}}]
{{~/inline}}
{{#*inline "character"}}
{{~definition.character~}}

View File

@ -42,25 +42,6 @@ class AnkiNoteBuilder {
note.fields[fieldName] = await this.formatField(fieldValue, definition, mode, context, options, templates, null);
}
if (!isKanji && definition.audio) {
const audioFields = [];
for (const [fieldName, fieldValue] of modeOptionsFieldEntries) {
if (fieldValue.includes('{audio}')) {
audioFields.push(fieldName);
}
}
if (audioFields.length > 0) {
note.audio = {
url: definition.audio.url,
filename: definition.audio.filename,
skipHash: '7e2c2f954ef6051373ba916f000168dc', // hash of audio data that should be skipped
fields: audioFields
};
}
}
return note;
}
@ -88,18 +69,31 @@ class AnkiNoteBuilder {
});
}
async injectAudio(definition, fields, sources, details) {
async injectAudio(definition, fields, sources, customSourceUrl) {
if (!this._containsMarker(fields, 'audio')) { return; }
try {
const expressions = definition.expressions;
const audioSourceDefinition = Array.isArray(expressions) ? expressions[0] : definition;
const {uri} = await this._audioSystem.getDefinitionAudio(audioSourceDefinition, sources, details);
const filename = this._createInjectedAudioFileName(audioSourceDefinition);
if (filename !== null) {
definition.audio = {url: uri, filename};
if (filename === null) { return; }
const {audio} = await this._audioSystem.getDefinitionAudio(
audioSourceDefinition,
sources,
{
textToSpeechVoice: null,
customSourceUrl,
binary: true,
disableCache: true
}
);
const data = AnkiNoteBuilder.arrayBufferToBase64(audio);
await this._anki.storeMediaFile(filename, data);
definition.audioFileName = filename;
} catch (e) {
// NOP
}
@ -129,6 +123,7 @@ class AnkiNoteBuilder {
if (reading) { filename += `_${reading}`; }
if (expression) { filename += `_${expression}`; }
filename += '.mp3';
filename = filename.replace(/\]/g, '');
return filename;
}
@ -152,6 +147,10 @@ class AnkiNoteBuilder {
return false;
}
static arrayBufferToBase64(arrayBuffer) {
return window.btoa(String.fromCharCode(...new Uint8Array(arrayBuffer)));
}
static stringReplaceAsync(str, regex, replacer) {
let match;
let index = 0;

View File

@ -507,7 +507,7 @@ class Backend {
definition,
options.anki.terms.fields,
options.audio.sources,
{textToSpeechVoice: null, customSourceUrl}
customSourceUrl
);
}

View File

@ -108,6 +108,25 @@ const profileOptionsVersionUpdates = [
fieldTemplates += '\n\n{{#*inline "document-title"}}\n {{~context.document.title~}}\n{{/inline}}';
options.anki.fieldTemplates = fieldTemplates;
}
},
(options) => {
// Version 14 changes:
// Changed template for Anki audio.
let fieldTemplates = options.anki.fieldTemplates;
if (typeof fieldTemplates !== 'string') { return; }
const replacement = '{{#*inline "audio"~}}\n [sound:{{definition.audioFileName}}]\n{{~/inline}}';
let replaced = false;
fieldTemplates = fieldTemplates.replace(/\{\{#\*inline "audio"\}\}\{\{\/inline\}\}/g, () => {
replaced = true;
return replacement;
});
if (!replaced) {
fieldTemplates += '\n\n' + replacement;
}
options.anki.fieldTemplates = fieldTemplates;
}
];

View File

@ -79,7 +79,7 @@ class AudioSystem {
async getDefinitionAudio(definition, sources, details) {
const key = `${definition.expression}:${definition.reading}`;
const hasCache = (this._cache !== null);
const hasCache = (this._cache !== null && !details.disableCache);
if (hasCache) {
const cacheValue = this._cache.get(key);
@ -98,7 +98,11 @@ class AudioSystem {
if (uri === null) { continue; }
try {
const audio = await this._createAudio(uri);
const audio = (
details.binary ?
await this._createAudioBinary(uri) :
await this._createAudio(uri)
);
if (hasCache) {
this._cacheCheck();
this._cache.set(key, {audio, uri, source});
@ -124,6 +128,14 @@ class AudioSystem {
// NOP
}
_getAudioUri(definition, source, details) {
return (
this._audioUriBuilder !== null ?
this._audioUriBuilder.getUri(definition, source, details) :
null
);
}
async _createAudio(uri) {
const ttsParameters = this._getTextToSpeechParameters(uri);
if (ttsParameters !== null) {
@ -134,21 +146,20 @@ class AudioSystem {
return await this._createAudioFromUrl(uri);
}
_getAudioUri(definition, source, details) {
return (
this._audioUriBuilder !== null ?
this._audioUriBuilder.getUri(definition, source, details) :
null
);
async _createAudioBinary(uri) {
const ttsParameters = this._getTextToSpeechParameters(uri);
if (ttsParameters !== null) {
throw new Error('Cannot create audio from text-to-speech');
}
return await this._createAudioBinaryFromUrl(uri);
}
_createAudioFromUrl(url) {
return new Promise((resolve, reject) => {
const audio = new Audio(url);
audio.addEventListener('loadeddata', () => {
const duration = audio.duration;
if (duration === 5.694694 || duration === 5.720718) {
// Hardcoded values for invalid audio
if (!this._isAudioValid(audio)) {
reject(new Error('Could not retrieve audio'));
} else {
resolve(audio);
@ -158,6 +169,42 @@ class AudioSystem {
});
}
_createAudioBinaryFromUrl(url) {
return new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
xhr.responseType = 'arraybuffer';
xhr.addEventListener('load', () => {
const arrayBuffer = xhr.response;
if (!this._isAudioBinaryValid(arrayBuffer)) {
reject(new Error('Could not retrieve audio'));
} else {
resolve(arrayBuffer);
}
});
xhr.addEventListener('error', () => reject(new Error('Failed to connect')));
xhr.open('GET', url);
xhr.send();
});
}
_isAudioValid(audio) {
const duration = audio.duration;
return (
duration !== 5.694694 && // jpod101 invalid audio (Chrome)
duration !== 5.720718 // jpod101 invalid audio (Firefox)
);
}
_isAudioBinaryValid(arrayBuffer) {
const digest = TextToSpeechAudio.arrayBufferDigest(arrayBuffer);
switch (digest) {
case 'ae6398b5a27bc8c0a771df6c907ade794be15518174773c58c7c7ddd17098906': // jpod101 invalid audio
return false;
default:
return true;
}
}
_getTextToSpeechVoiceFromVoiceUri(voiceUri) {
try {
for (const voice of speechSynthesis.getVoices()) {
@ -195,4 +242,13 @@ class AudioSystem {
this._cache.delete(key);
}
}
static async arrayBufferDigest(arrayBuffer) {
const hash = new Uint8Array(await crypto.subtle.digest('SHA-256', new Uint8Array(arrayBuffer)));
let digest = '';
for (const byte of hash) {
digest += byte.toString(16).padStart(2, '0');
}
return digest;
}
}