Project

General

Profile

Download (6.81 KB) Statistics
| Branch: | Tag: | Revision:

haketilo / background / stream_filter.js @ d911bf37

1
/**
2
 * This file is part of Haketilo.
3
 *
4
 * Function: Modifying a web page using the StreamFilter API.
5
 *
6
 * Copyright (C) 2021, Wojtek Kosior
7
 * Copyright (C) 2018, Giorgio Maone <giorgio@maone.net>
8
 *
9
 * This program is free software: you can redistribute it and/or modify
10
 * it under the terms of the GNU General Public License as published by
11
 * the Free Software Foundation, either version 3 of the License, or
12
 * (at your option) any later version.
13
 *
14
 * This program is distributed in the hope that it will be useful,
15
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17
 * GNU General Public License for more details.
18
 *
19
 * You should have received a copy of the GNU General Public License
20
 * along with this program.  If not, see <https://www.gnu.org/licenses/>.
21
 *
22
 *
23
 * I, Wojtek Kosior, thereby promise not to sue for violation of this file's
24
 * license. Although I request that you do not make use of this code in a
25
 * proprietary program, I am not going to enforce this in court.
26
 *
27
 *
28
 * Derived from `bg/ResponseProcessor.js' and `bg/ResponseMetaData.js'
29
 * in LibreJS.
30
 */
31

    
32
#FROM common/browser.js IMPORT browser
33
#FROM common/misc.js    IMPORT csp_header_regex
34

    
35
function validate_encoding(charset)
36
{
37
    try {
38
	new TextDecoder(charset);
39
	return charset;
40
    } catch(e) {
41
	return undefined;
42
    }
43
}
44

    
45
function is_content_type_header(header)
46
{
47
    return header.name.toLowerCase().trim() === "content-type";
48
}
49

    
50
const charset_reg = /;\s*charset\s*=\s*([\w-]+)/i;
51

    
52
function properties_from_headers(headers)
53
{
54
    const properties = {};
55

    
56
    for (const header of headers.filter(is_content_type_header)) {
57
	const match = charset_reg.exec(header.value);
58
	if (match && !properties.detected_charset &&
59
	    validate_encoding(match[1]))
60
	    properties.detected_charset = match[1];
61

    
62
	if (/html/i.test(header.value))
63
	    properties.html = true;
64
    }
65

    
66
    return properties;
67
}
68

    
69
const UTF8_BOM = [0xef, 0xbb, 0xbf];
70
const BOMs = [
71
    [UTF8_BOM, "utf-8"],
72
    [[0xfe, 0xff], "utf-16be"],
73
    [[0xff, 0xfe], "utf-16le"]
74
];
75

    
76
function charset_from_BOM(data)
77
{
78
    for (const [BOM, charset] of BOMs) {
79
	if (BOM.reduce((ac, byte, i) => ac && byte === data[i], true))
80
	    return charset;
81
    }
82

    
83
    return "";
84
}
85

    
86
const charset_attrs =
87
      ['charset', 'http-equiv="content-type"', 'content*="charset"'];
88
const charset_meta_selector =
89
      charset_attrs.map(a => `head>meta[${a}]`).join(", ");
90

    
91
function charset_from_meta_tags(doc)
92
{
93
    for (const meta of doc.querySelectorAll(charset_meta_selector)) {
94
	const maybe_charset = meta.getAttribute("charset");
95
	if (maybe_charset && validate_encoding(maybe_charset))
96
	    return maybe_charset;
97

    
98
        const match = charset_reg.exec(meta.getAttribute("content"));
99
        if (match && validate_encoding(match[1]))
100
	    return match[1];
101
    }
102

    
103
    return undefined;
104
}
105

    
106
function create_decoder(properties, data)
107
{
108
    let charset = charset_from_BOM(data) || properties.detected_charset;
109

    
110
    if (charset)
111
	return new TextDecoder(charset);
112

    
113
    if (data.indexOf(0) !== -1) {
114
        console.warn("Haketilo: zeroes in bytestream, probable cached encoding mismatch. Trying to decode it as UTF-16.",
115
		     properties);
116
	return new TextDecoder("utf-16be");
117
    }
118

    
119
    /* Missing HTTP charset, sniffing in content... */
120
    /*
121
     * TODO: I recall there is some standard saying how early in the doc the
122
     * charset has to be specified. We could process just this part of data.
123
     */
124
    const text = new TextDecoder("latin1").decode(data, {stream: true});
125
    properties.html = properties.html || /html/i.test(text);
126

    
127
    if (properties.html) {
128
	const tmp_doc = new DOMParser().parseFromString(text, "text/html");
129
	charset = charset_from_meta_tags(tmp_doc);
130
    }
131

    
132
    return new TextDecoder(charset || "latin1");
133
}
134

    
135
function may_define_csp_rules(html)
136
{
137
    const doc = new DOMParser().parseFromString(html, "text/html");
138

    
139
    for (const meta of doc.querySelectorAll("head>meta[http-equiv]")) {
140
	if (csp_header_regex.test(meta.httpEquiv) && meta.content)
141
	    return true;
142
    }
143

    
144
    /*
145
     * Even if no naughty `<meta>' tags were found, subsequent chunk of HTML
146
     * data could add some. Before we return `false' we need to be sure we
147
     * reached the start of `<body>' where `<meta>' tags are no longer valid.
148
     */
149

    
150
    if (doc.documentElement.nextSibling || doc.body.nextSibling ||
151
	doc.body.childNodes.length > 1)
152
	return false;
153

    
154
    if (!doc.body.firstChild)
155
	return true;
156

    
157
    if (doc.body.firstChild.nodeName !== "#text")
158
	return false;
159

    
160
    return /^(<\/|&#|.)$/.test(doc.body.firstChild.wholeText);
161
}
162

    
163
function filter_data(properties, event)
164
{
165
    const data = new Uint8Array(event.data);
166
    let first_chunk = false;
167
    if (!properties.decoder) {
168
	first_chunk = true;
169
	properties.decoder = create_decoder(properties, data);
170
	properties.encoder = new TextEncoder();
171
    }
172

    
173
    let decoded = properties.decoder.decode(data);
174

    
175
    /* Force UTF-8, this is the only encoding we can produce. */
176
    if (first_chunk)
177
	properties.filter.write(new Uint8Array(UTF8_BOM));
178

    
179
    if (first_chunk && may_define_csp_rules(decoded)) {
180
	/*
181
	 * HAX! Our content scripts that execute at `document_start' will always
182
	 * run before the first script in the document, but under Mozilla some
183
	 * `<meta>' tags might already be loaded at that point. Here we inject a
184
	 * dummy `<script>' at the beginning (before any `<meta>' tags) that
185
	 * will force `document_start' to happen earlier. This way our content
186
	 * scripts will be able to sanitize `http-equiv' tags with CSP rules
187
	 * that would otherwise stop our injected scripts from executing.
188
	 *
189
	 * As we want to only process HTML files that happen to have naughty
190
	 * `<meta>' tags in `<head>', we use a DOMParser-based heuristic in
191
	 * `may_define_rules()'. We don't do any additional MIME sniffing as it
192
	 * is too unreliable (and our heuristic will likely mark non-HTML files
193
	 * as harmless anyway).
194
	 */
195

    
196
	const dummy_script = `<script>null</script>`;
197
	const doctype_decl = /^(\s*<!doctype[^<>"']*>)?/i.exec(decoded)[0];
198
	decoded = doctype_decl + dummy_script +
199
	    decoded.substring(doctype_decl.length);
200
    }
201

    
202
    properties.filter.write(properties.encoder.encode(decoded));
203

    
204
    if (properties.decoder.encoding === "utf-8")
205
	properties.filter.disconnect();
206
}
207

    
208
function apply(details, headers, policy)
209
{
210
    if (!policy.payload)
211
	return headers;
212

    
213
    const properties = properties_from_headers(headers);
214

    
215
    properties.filter =
216
	browser.webRequest.filterResponseData(details.requestId);
217

    
218
    properties.filter.ondata = event => filter_data(properties, event);
219
    properties.filter.onstop = () => properties.filter.close();
220

    
221
    /*
222
     * In the future we might consider modifying the headers that specify
223
     * encoding. For now we are not yet doing it, though. However, we
224
     * prepend the data with UTF-8 BOM which should be enough.
225
     */
226
    return headers;
227
}
228
#EXPORT apply
(6-6/7)