Project

General

Profile

Download (6.74 KB) Statistics
| Branch: | Tag: | Revision:

haketilo / background / stream_filter.js @ 4970930c

1
/**
2
 * This file is part of Haketilo.
3
 *
4
 * Function: Modifying a web page using the StreamFilter API.
5
 *
6
 * Copyright (C) 2021, Wojtek Kosior
7
 * Copyright (C) 2018, Giorgio Maone <giorgio@maone.net>
8
 *
9
 * This program is free software: you can redistribute it and/or modify
10
 * it under the terms of the GNU General Public License as published by
11
 * the Free Software Foundation, either version 3 of the License, or
12
 * (at your option) any later version.
13
 *
14
 * This program is distributed in the hope that it will be useful,
15
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17
 * GNU General Public License for more details.
18
 *
19
 * You should have received a copy of the GNU General Public License
20
 * along with this program.  If not, see <https://www.gnu.org/licenses/>.
21
 *
22
 *
23
 * I, Wojtek Kosior, thereby promise not to sue for violation of this file's
24
 * license. Although I request that you do not make use of this code in a
25
 * proprietary program, I am not going to enforce this in court.
26
 *
27
 *
28
 * Derived from `bg/ResponseProcessor.js' and `bg/ResponseMetaData.js'
29
 * in LibreJS.
30
 */
31

    
32
#FROM common/browser.js IMPORT browser
33
#FROM common/misc.js    IMPORT csp_header_regex
34

    
35
function validate_encoding(charset)
36
{
37
    try {
38
	new TextDecoder();
39
	return charset;
40
    } catch(e) {
41
	return undefined;
42
    }
43
}
44

    
45
function is_content_type_header(header)
46
{
47
    header.name.toLowerCase().trim() === "content-type";
48
}
49

    
50
const charset_reg = /;\s*charset\s*=\s*([\w-]+)/i;
51

    
52
function properties_from_headers(headers)
53
{
54
    const properties = {};
55

    
56
    for (const header of headers.filter(is_content_type_header)) {
57
	const match = charset_reg.exec(header.value);
58
	if (!properties.detected_charset && validate_encoding(match[1]))
59
	    properties.detected_charset = match[1];
60

    
61
	if (/html/i.test(header.value))
62
	    properties.html = true;
63
    }
64

    
65
    return properties;
66
}
67

    
68
const UTF8_BOM = [0xef, 0xbb, 0xbf];
69
const BOMs = [
70
    [UTF8_BOM, "utf-8"],
71
    [[0xfe, 0xff], "utf-16be"],
72
    [[0xff, 0xfe], "utf-16le"]
73
];
74

    
75
function charset_from_BOM(data)
76
{
77
    for (const [BOM, charset] of BOMs) {
78
	if (BOM.reduce((ac, byte, i) => ac && byte === data[i], true))
79
	    return charset;
80
    }
81

    
82
    return "";
83
}
84

    
85
const charset_attrs =
86
      ['charset', 'http-equiv="content-type"', 'content*="charset"'];
87
const charset_meta_selector =
88
      charset_attrs.map(a => `head>meta[${a}]`).join(", ");
89

    
90
function charset_from_meta_tags(doc)
91
{
92
    for (const meta of doc.querySelectorAll(charset_meta_selector)) {
93
	const maybe_charset = meta.getAttribute("charset");
94
	if (maybe_charset && validate_encoding(maybe_charset))
95
	    return maybe_charset;
96

    
97
        const match = charset_reg.exec(meta.getAttribute("content"));
98
        if (match && validate_encoding(match[1]))
99
	    return match[1];
100
    }
101

    
102
    return undefined;
103
}
104

    
105
function create_decoder(properties, data)
106
{
107
    let charset = charset_from_BOM(data) || properties.detected_charset;
108
    if (!charset && data.indexOf(0) !== -1) {
109
        console.warn("Haketilo: zeroes in bytestream, probable cached encoding mismatch. Trying to decode it as UTF-16.",
110
		     properties);
111
	return new TextDecoder("utf-16be");
112
    }
113

    
114
    /* Missing HTTP charset, sniffing in content... */
115
    /*
116
     * TODO: I recall there is some standard saying how early in the doc the
117
     * charset has to be specified. We could process just this part of data.
118
     */
119
    const text = new TextDecoder("latin1").decode(data, {stream: true});
120
    properties.html = properties.html || /html/i.test(text);
121

    
122
    if (properties.html) {
123
	const tmp_doc = new DOMParser().parseFromString(text, "text/html");
124
	charset = charset_from_meta_tags(tmp_doc);
125
    }
126

    
127
    return new TextDecoder(charset || "latin1");
128
}
129

    
130
function may_define_csp_rules(html)
131
{
132
    const doc = new DOMParser().parseFromString(html, "text/html");
133

    
134
    for (const meta of doc.querySelectorAll("head>meta[http-equiv]")) {
135
	if (csp_header_regex.test(meta.httpEquiv) && meta.content)
136
	    return true;
137
    }
138

    
139
    /*
140
     * Even if no naughty `<meta>' tags were found, subsequent chunk of HTML
141
     * data could add some. Before we return `false' we need to be sure we
142
     * reached the start of `<body>' where `<meta>' tags are no longer valid.
143
     */
144

    
145
    if (doc.documentElement.nextSibling || doc.body.nextSibling ||
146
	doc.body.childNodes.length > 1)
147
	return false;
148

    
149
    if (!doc.body.firstChild)
150
	return true;
151

    
152
    if (doc.body.firstChild.nodeName !== "#text")
153
	return false;
154

    
155
    return /^(<\/|&#|.)$/.test(doc.body.firstChild.wholeText);
156
}
157

    
158
function filter_data(properties, event)
159
{
160
    const data = new Uint8Array(event.data);
161
    let first_chunk = false;
162
    if (!properties.decoder) {
163
	first_chunk = true;
164
	properties.decoder = create_decoder(properties, data);
165
	properties.encoder = new TextEncoder();
166
    }
167

    
168
    let decoded = properties.decoder.decode(data);
169

    
170
    /* Force UTF-8, this is the only encoding we can produce. */
171
    if (first_chunk)
172
	properties.filter.write(new Uint8Array(UTF8_BOM));
173

    
174
    if (first_chunk && may_define_csp_rules(decoded)) {
175
	/*
176
	 * HAX! Our content scripts that execute at `document_start' will always
177
	 * run before the first script in the document, but under Mozilla some
178
	 * `<meta>' tags might already be loaded at that point. Here we inject a
179
	 * dummy `<script>' at the beginning (before any `<meta>' tags) that
180
	 * will force `document_start' to happen earlier. This way our content
181
	 * scripts will be able to sanitize `http-equiv' tags with CSP rules
182
	 * that would otherwise stop our injected scripts from executing.
183
	 *
184
	 * As we want to only process HTML files that happen to have naughty
185
	 * `<meta>' tags in `<head>', we use a DOMParser-based heuristic in
186
	 * `may_define_rules()'. We don't do any additional MIME sniffing as it
187
	 * is too unreliable (and our heuristic will likely mark non-HTML files
188
	 * as harmless anyway).
189
	 */
190

    
191
	const dummy_script = `<script>null</script>`;
192
	const doctype_decl = /^(\s*<!doctype[^<>"']*>)?/i.exec(decoded)[0];
193
	decoded = doctype_decl + dummy_script +
194
	    decoded.substring(doctype_decl.length);
195
    }
196

    
197
    properties.filter.write(properties.encoder.encode(decoded));
198

    
199
    if (properties.decoder.encoding === "utf-8")
200
	properties.filter.disconnect();
201
}
202

    
203
function apply(details, headers, policy)
204
{
205
    if (!policy.payload)
206
	return headers;
207

    
208
    const properties = properties_from_headers(headers);
209

    
210
    properties.filter =
211
	browser.webRequest.filterResponseData(details.requestId);
212

    
213
    properties.filter.ondata = event => filter_data(properties, event);
214
    properties.filter.onstop = () => properties.filter.close();
215

    
216
    /*
217
     * In the future we might consider modifying the headers that specify
218
     * encoding. For now we are not yet doing it, though. However, we
219
     * prepend the data with UTF-8 BOM which should be enough.
220
     */
221
    return headers;
222
}
223
#EXPORT apply
(6-6/7)