Project

General

Profile

Download (13.1 KB) Statistics
| Branch: | Tag: | Revision:

hydrilla-builder / src / hydrilla / builder / build.py @ 16eaeb86

1
# SPDX-License-Identifier: AGPL-3.0-or-later
2

    
3
# Building Hydrilla packages.
4
#
5
# This file is part of Hydrilla
6
#
7
# Copyright (C) 2022 Wojtek Kosior
8
#
9
# This program is free software: you can redistribute it and/or modify
10
# it under the terms of the GNU Affero General Public License as
11
# published by the Free Software Foundation, either version 3 of the
12
# License, or (at your option) any later version.
13
#
14
# This program is distributed in the hope that it will be useful,
15
# but WITHOUT ANY WARRANTY; without even the implied warranty of
16
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17
# GNU Affero General Public License for more details.
18
#
19
# You should have received a copy of the GNU Affero General Public License
20
# along with this program.  If not, see <https://www.gnu.org/licenses/>.
21
#
22
#
23
# I, Wojtek Kosior, thereby promise not to sue for violation of this
24
# file's license. Although I request that you do not make use this code
25
# in a proprietary program, I am not going to enforce this in court.
26

    
27
import json
28
import re
29
import zipfile
30
from pathlib import Path
31
from hashlib import sha256
32
from sys import stderr
33

    
34
import jsonschema
35

    
36
from .. import util
37

    
38
here = Path(__file__).resolve().parent
39
with open(here / 'schemas' / 'package_source-1.schema.json') as schema_file:
40
    index_json_schema = json.load(schema_file)
41

    
42
class FileReferenceError(Exception):
43
    """
44
    Exception used to report various problems concerning files referenced from
45
    source package's index.json.
46
    """
47

    
48
class ReuseError(Exception):
49
    """
50
    Exception used to report various problems when calling the REUSE tool.
51
    """
52

    
53
class FileBuffer:
54
    """
55
    Implement a file-like object that buffers data written to it.
56
    """
57
    def __init__(self):
58
        """
59
        Initialize FileBuffer.
60
        """
61
        self.chunks = []
62

    
63
    def write(self, b):
64
        """
65
        Buffer 'b', return number of bytes buffered.
66

    
67
        'b' is expected to be an instance of 'bytes' or 'str', in which case it
68
        gets encoded as UTF-8.
69
        """
70
        if type(b) is str:
71
            b = b.encode()
72
        self.chunks.append(b)
73
        return len(b)
74

    
75
    def flush(self):
76
        """
77
        A no-op mock of file-like object's flush() method.
78
        """
79
        pass
80

    
81
    def get_bytes(self):
82
        """
83
        Return all data written so far concatenated into a single 'bytes'
84
        object.
85
        """
86
        return b''.join(self.chunks)
87

    
88
def generate_spdx_report(root):
89
    """
90
    Use REUSE tool to generate an SPDX report for sources under 'root' and
91
    return the report's contents as 'bytes'.
92

    
93
    'root' shall be an instance of pathlib.Path.
94

    
95
    In case the directory tree under 'root' does not constitute a
96
    REUSE-compliant package, linting report is printed to standard output and
97
    an exception is raised.
98

    
99
    In case the reuse package is not installed, an exception is also raised.
100
    """
101
    try:
102
        from reuse._main import main as reuse_main
103
    except ModuleNotFoundError:
104
        ReuseError("Could not import 'reuse'. Is the tool installed and visible to this Python instance?")
105

    
106
    mocked_output = FileBuffer()
107
    if reuse_main(args=['--root', str(root), 'lint'], out=mocked_output) != 0:
108
        stderr.write(mocked_output.get_bytes().decode())
109
        raise ReuseError('Attempt to generate an SPDX report for a REUSE-incompliant package.')
110

    
111
    mocked_output = FileBuffer()
112
    if reuse_main(args=['--root', str(root), 'spdx'], out=mocked_output) != 0:
113
        stderr.write(mocked_output.get_bytes().decode())
114
        raise ReuseError("Couldn't generate an SPDX report for package.")
115

    
116
    return mocked_output.get_bytes()
117

    
118
class FileRef:
119
    """Represent reference to a file in the package."""
120
    def __init__(self, path: Path, contents: bytes):
121
        """Initialize FileRef."""
122
        self.include_in_distribution = False
123
        self.include_in_zipfile      = True
124
        self.path                    = path
125
        self.contents                = contents
126

    
127
        self.contents_hash = sha256(contents).digest().hex()
128

    
129
    def make_ref_dict(self, filename: str):
130
        """
131
        Represent the file reference through a dict that can be included in JSON
132
        defintions.
133
        """
134
        return {
135
            'file':   filename,
136
            'sha256': self.contents_hash
137
        }
138

    
139
class Build:
140
    """
141
    Build a Hydrilla package.
142
    """
143
    def __init__(self, srcdir, index_json_path):
144
        """
145
        Initialize a build. All files to be included in a distribution package
146
        are loaded into memory, all data gets validated and all necessary
147
        computations (e.g. preparing of hashes) are performed.
148

    
149
        'srcdir' and 'index_json' are expected to be pathlib.Path objects.
150
        """
151
        self.srcdir          = srcdir.resolve()
152
        self.index_json_path = index_json_path
153
        self.files_by_path   = {}
154
        self.resource_list   = []
155
        self.mapping_list    = []
156

    
157
        if not index_json_path.is_absolute():
158
            self.index_json_path = (self.srcdir / self.index_json_path)
159

    
160
        self.index_json_path = self.index_json_path.resolve()
161

    
162
        with open(self.index_json_path, 'rt') as index_file:
163
            index_json_text = index_file.read()
164

    
165
        index_obj = json.loads(util.strip_json_comments(index_json_text))
166

    
167
        self.files_by_path[self.srcdir / 'index.json'] = \
168
            FileRef(self.srcdir / 'index.json', index_json_text.encode())
169

    
170
        self._process_index_json(index_obj)
171

    
172
    def _process_file(self, filename: str, include_in_distribution: bool=True):
173
        """
174
        Resolve 'filename' relative to srcdir, load it to memory (if not loaded
175
        before), compute its hash and store its information in
176
        'self.files_by_path'.
177

    
178
        'filename' shall represent a relative path using '/' as a separator.
179

    
180
        if 'include_in_distribution' is True it shall cause the file to not only
181
        be included in the source package's zipfile, but also written as one of
182
        built package's files.
183

    
184
        Return file's reference object that can be included in JSON defintions
185
        of various kinds.
186
        """
187
        path = self.srcdir
188
        for segment in filename.split('/'):
189
            path /= segment
190

    
191
        path = path.resolve()
192
        if not path.is_relative_to(self.srcdir):
193
            raise FileReferenceError(f"Attempt to load '{filename}' which lies outside package source directory.")
194

    
195
        if str(path.relative_to(self.srcdir)) == 'index.json':
196
            raise FileReferenceError("Attempt to load 'index.json' which is a reserved filename.")
197

    
198
        file_ref = self.files_by_path.get(path)
199
        if file_ref is None:
200
            with open(path, 'rb') as file_handle:
201
                contents = file_handle.read()
202

    
203
            file_ref = FileRef(path, contents)
204
            self.files_by_path[path] = file_ref
205

    
206
        if include_in_distribution:
207
            file_ref.include_in_distribution = True
208

    
209
        return file_ref.make_ref_dict(filename)
210

    
211
    def _prepare_source_package_zip(self, root_dir_name: str):
212
        """
213
        Create and store in memory a .zip archive containing files needed to
214
        build this source package.
215

    
216
        'root_dir_name' shall not contain any slashes ('/').
217

    
218
        Return zipfile's sha256 sum's hexstring.
219
        """
220
        fb = FileBuffer()
221
        root_dir_path = Path(root_dir_name)
222

    
223
        def zippath(file_path):
224
            file_path = root_dir_path / file_path.relative_to(self.srcdir)
225
            return file_path.as_posix()
226

    
227
        with zipfile.ZipFile(fb, 'w') as xpi:
228
            for file_ref in self.files_by_path.values():
229
                if file_ref.include_in_zipfile:
230
                    xpi.writestr(zippath(file_ref.path), file_ref.contents)
231

    
232
        self.source_zip_contents = fb.get_bytes()
233

    
234
        return sha256(self.source_zip_contents).digest().hex()
235

    
236
    def _process_item(self, item_def: dict):
237
        """
238
        Process 'item_def' as definition of a resource/mapping and store in
239
        memory its processed form and files used by it.
240

    
241
        Return a minimal item reference suitable for using in source
242
        description.
243
        """
244
        copy_props = ['type', 'identifier', 'long_name', 'uuid', 'description']
245
        if 'comment' in item_def:
246
            copy_props.append('comment')
247

    
248
        if item_def['type'] == 'resource':
249
            item_list = self.resource_list
250

    
251
            copy_props.append('revision')
252

    
253
            script_file_refs = [self._process_file(f['file'])
254
                                for f in item_def.get('scripts', [])]
255

    
256
            new_item_obj = {
257
                'dependencies': item_def.get('dependencies', []),
258
                'scripts':      script_file_refs
259
            }
260
        else:
261
            item_list = self.mapping_list
262

    
263
            payloads = {}
264
            for pat, res_ref in item_def.get('payloads', {}).items():
265
                payloads[pat] = {'identifier': res_ref['identifier']}
266

    
267
            new_item_obj = {
268
                'payloads': payloads
269
            }
270

    
271
        new_item_obj.update([(p, item_def[p]) for p in copy_props])
272

    
273
        new_item_obj['version'] = util.normalize_version(item_def['version'])
274
        new_item_obj['api_schema_version'] = [1, 0, 1]
275
        new_item_obj['source_copyright'] = self.copyright_file_refs
276
        new_item_obj['source_name'] = self.source_name
277

    
278
        item_list.append(new_item_obj)
279

    
280
        return dict([(prop, new_item_obj[prop])
281
                     for prop in ('type', 'identifier', 'version')])
282

    
283
    def _process_index_json(self, index_obj: dict):
284
        """
285
        Process 'index_obj' as contents of source package's index.json and store
286
        in memory this source package's zipfile as well as package's individual
287
        files and computed definitions of the source package and items defined
288
        in it.
289
        """
290
        jsonschema.validate(index_obj, index_json_schema)
291

    
292
        self.source_name = index_obj['source_name']
293

    
294
        generate_spdx = index_obj.get('reuse_generate_spdx_report', False)
295
        if generate_spdx:
296
            contents  = generate_spdx_report(self.srcdir)
297
            spdx_path = (self.srcdir / 'report.spdx').resolve()
298
            spdx_ref  = FileRef(spdx_path, contents)
299

    
300
            spdx_ref.include_in_zipfile = False
301
            self.files_by_path[spdx_path] = spdx_ref
302

    
303
        self.copyright_file_refs = \
304
            [self._process_file(f['file']) for f in index_obj['copyright']]
305

    
306
        if generate_spdx and not spdx_ref.include_in_distribution:
307
            raise FileReferenceError("Told to generate 'report.spdx' but 'report.spdx' is not listed among copyright files. Refusing to proceed.")
308

    
309
        item_refs = [self._process_item(d) for d in index_obj['definitions']]
310

    
311
        for file_ref in index_obj.get('additional_files', []):
312
            self._process_file(file_ref['file'], include_in_distribution=False)
313

    
314
        root_dir_path = Path(self.source_name)
315

    
316
        source_archives_obj = {
317
            'zip' : {
318
                'sha256': self._prepare_source_package_zip(root_dir_path)
319
            }
320
        }
321

    
322
        self.source_description = {
323
            'api_schema_version': [1, 0, 1],
324
            'source_name':        self.source_name,
325
            'source_copyright':   self.copyright_file_refs,
326
            'upstream_url':       index_obj['upstream_url'],
327
            'definitions':        item_refs,
328
            'source_archives':    source_archives_obj
329
        }
330

    
331
        if 'comment' in index_obj:
332
            self.source_description['comment'] = index_obj['comment']
333

    
334
    def write_source_package_zip(self, dstpath: Path):
335
        """
336
        Create a .zip archive containing files needed to build this source
337
        package and write it at 'dstpath'.
338
        """
339
        with open(dstpath, 'wb') as output:
340
            output.write(self.source_zip_contents)
341

    
342
    def write_package_files(self, dstpath: Path):
343
        """Write package files under 'dstpath' for distribution."""
344
        file_dir_path = (dstpath / 'file').resolve()
345
        file_dir_path.mkdir(parents=True, exist_ok=True)
346

    
347
        for file_ref in self.files_by_path.values():
348
            if file_ref.include_in_distribution:
349
                file_name = f'sha256-{file_ref.contents_hash}'
350
                with open(file_dir_path / file_name, 'wb') as output:
351
                    output.write(file_ref.contents)
352

    
353
        source_dir_path = (dstpath / 'source').resolve()
354
        source_dir_path.mkdir(parents=True, exist_ok=True)
355
        source_name = self.source_description["source_name"]
356

    
357
        with open(source_dir_path / f'{source_name}.json', 'wt') as output:
358
            json.dump(self.source_description, output)
359

    
360
        with open(source_dir_path / f'{source_name}.zip', 'wb') as output:
361
            output.write(self.source_zip_contents)
362

    
363
        for item_type, item_list in [
364
                ('resource', self.resource_list),
365
                ('mapping', self.mapping_list)
366
        ]:
367
            item_type_dir_path = (dstpath / item_type).resolve()
368

    
369
            for item_def in item_list:
370
                item_dir_path = item_type_dir_path / item_def['identifier']
371
                item_dir_path.mkdir(parents=True, exist_ok=True)
372

    
373
                version = '.'.join([str(n) for n in item_def['version']])
374
                with open(item_dir_path / version, 'wt') as output:
375
                    json.dump(item_def, output)
(3-3/4)