diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..d436adb --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,6 @@ +prune sbapp/patches +prune sbapp/dist +prune recipes +prune docs +prune libs +prune .github \ No newline at end of file diff --git a/README.md b/README.md index 197afdb..c1ff3e6 100644 --- a/README.md +++ b/README.md @@ -146,15 +146,24 @@ sideband ## On macOS -A DMG file containing a macOS app bundle is available on the [latest release](https://github.com/markqvist/Sideband/releases/latest) page. +On macOS, you can install Sideband with `pip3` or `pipx`. Due to the many different potential Python versions and install paths across macOS versions, the easiest install method is to use `pipx`. -Please note that audio messaging functionality isn't supported on macOS yet. Please support the development if you'd like to see this feature added faster. - -Alternatively, you can install Sideband with ``pip3`` on macOS: +If you don't already have the `pipx` package manager installed, it can be installed via [Homebrew](https://brew.sh/) with `brew install pipx`. ```bash -# Install Sideband and dependencies on macOS: -pip3 install sbapp +# Install Sideband and dependencies on macOS using pipx: +pipx install sbapp +pipx ensurepath + +# Run it +sideband +``` + +Or, if you prefer to use `pip` directly, follow the instructions below. In this case, if you have not already installed Python and `pip3` on your macOS system, [download and install](https://www.python.org/downloads/) the latest version first. + +```bash +# Install Sideband and dependencies on macOS using pip: +pip3 install sbapp --user --break-system-packages # Run it: python3 -m sbapp.main @@ -166,8 +175,6 @@ sideband ``` -If you have not already installed Python and `pip3` on your macOS system, [download and install](https://www.python.org/downloads/) the latest version first. - ## On Windows Even though there is currently not an automated installer, or packaged `.exe` file for Sideband on Windows, you can still install it through `pip`. If you don't already have Python installed, [download and install](https://www.python.org/downloads/) the latest version of Python. diff --git a/docs/example_plugins/telemetry.py b/docs/example_plugins/telemetry.py index b436a06..c6993e9 100644 --- a/docs/example_plugins/telemetry.py +++ b/docs/example_plugins/telemetry.py @@ -1,4 +1,4 @@ -# This is a bare-minimum telemetry plugin +# This is a basic telemetry plugin # example that you can build upon to # implement your own telemetry plugins. @@ -23,16 +23,44 @@ class BasicTelemetryPlugin(SidebandTelemetryPlugin): def update_telemetry(self, telemeter): if telemeter != None: - RNS.log("Updating power sensors") + # Create power consumption sensors telemeter.synthesize("power_consumption") telemeter.sensors["power_consumption"].update_consumer(2163.15, type_label="Heater consumption") telemeter.sensors["power_consumption"].update_consumer(12.7/1e6, type_label="Receiver consumption") - telemeter.sensors["power_consumption"].update_consumer(0.055, type_label="LED consumption") + telemeter.sensors["power_consumption"].update_consumer(0.055, type_label="LED consumption", custom_icon="led-on") telemeter.sensors["power_consumption"].update_consumer(982.22*1e9, type_label="Smelter consumption") + # Create power production sensor telemeter.synthesize("power_production") - telemeter.sensors["power_production"].update_producer(5732.15, type_label="Solar production") + telemeter.sensors["power_production"].update_producer(5732.15, type_label="Solar production", custom_icon="solar-power-variant") + + # Create storage sensor + telemeter.synthesize("nvm") + telemeter.sensors["nvm"].update_entry(capacity=256e9, used=38.48e9, type_label="SSD") + + # Create RAM sensors + telemeter.synthesize("ram") + telemeter.sensors["ram"].update_entry(capacity=8e9, used=3.48e9, type_label="RAM") + telemeter.sensors["ram"].update_entry(capacity=16e9, used=0.72e9, type_label="Swap") + + # Create CPU sensor + telemeter.synthesize("processor") + telemeter.sensors["processor"].update_entry(current_load=0.42, clock=2.45e9, load_avgs=[0.27, 0.43, 0.49], type_label="CPU") + + # Create custom sensor + telemeter.synthesize("custom") + telemeter.sensors["custom"].update_entry("311 seconds", type_label="Specific impulse is", custom_icon="rocket-launch") + telemeter.sensors["custom"].update_entry("a lie", type_label="The cake is", custom_icon="cake-variant") + + # Create tank sensors + telemeter.synthesize("tank") + telemeter.sensors["tank"].update_entry(capacity=1500, level=728, type_label="Fresh water", custom_icon="cup-water") + telemeter.sensors["tank"].update_entry(capacity=2000, level=122, unit="L", type_label="Waste tank") + + # Create fuel sensor + telemeter.synthesize("fuel") + telemeter.sensors["fuel"].update_entry(capacity=75, level=61) # Finally, tell Sideband what class in this # file is the actual plugin class. -plugin_class = BasicTelemetryPlugin \ No newline at end of file +plugin_class = BasicTelemetryPlugin diff --git a/libs/arm64/libcodec2.so b/libs/arm64/libcodec2.so new file mode 100755 index 0000000..b8481a5 Binary files /dev/null and b/libs/arm64/libcodec2.so differ diff --git a/libs/armeabi/libcodec2.so b/libs/armeabi/libcodec2.so new file mode 100755 index 0000000..e76ad7f Binary files /dev/null and b/libs/armeabi/libcodec2.so differ diff --git a/recipes/codec2/__init__.py b/recipes/codec2/__init__.py new file mode 100644 index 0000000..81aa527 --- /dev/null +++ b/recipes/codec2/__init__.py @@ -0,0 +1,50 @@ +from os.path import join +from pythonforandroid.recipe import Recipe +from pythonforandroid.toolchain import current_directory, shprint +import sh + +# For debugging, clean with +# buildozer android p4a -- clean_recipe_build codec2 --local-recipes ~/Information/Source/Sideband/recipes + +class Codec2Recipe(Recipe): + url = "https://github.com/markqvist/codec2/archive/00e01c9d72d3b1607e165c71c4c9c942d277dfac.tar.gz" + built_libraries = {'libcodec2.so': 'build_android/src'} + + def include_flags(self, arch): + '''Returns a string with the include folders''' + codec2_includes = join(self.get_build_dir(arch.arch), 'build_android') + return (' -I' + codec2_includes) + + def link_dirs_flags(self, arch): + '''Returns a string with the appropriate `-L` to link + with the libs. This string is usually added to the environment + variable `LDFLAGS`''' + return ' -L' + self.get_build_dir(arch.arch) + + # def link_libs_flags(self): + # '''Returns a string with the appropriate `-l` flags to link with + # the libs. This string is usually added to the environment + # variable `LIBS`''' + # return ' -lcodec2{version} -lssl{version}'.format(version=self.version) + + def build_arch(self, arch): + with current_directory(self.get_build_dir(arch.arch)): + env = self.get_recipe_env(arch) + flags = [ + "..", + "--log-level=TRACE", + "--fresh", + "-DCMAKE_BUILD_TYPE=Release", + ] + + mkdir = sh.mkdir("-p", "build_android") + # cd = sh.cd("build_android") + os.chdir("build_android") + cmake = sh.Command('cmake') + + shprint(cmake, *flags, _env=env) + shprint(sh.make, _env=env) + sh.cp("../src/codec2.h", "./codec2/") + + +recipe = Codec2Recipe() diff --git a/recipes/codec2/generate_codebook b/recipes/codec2/generate_codebook new file mode 100755 index 0000000..b4ed668 Binary files /dev/null and b/recipes/codec2/generate_codebook differ diff --git a/recipes/ffpyplayer/__init__.py b/recipes/ffpyplayer/__init__.py new file mode 100644 index 0000000..03960b7 --- /dev/null +++ b/recipes/ffpyplayer/__init__.py @@ -0,0 +1,1516 @@ +from os.path import join + +from os.path import basename, dirname, exists, isdir, isfile, join, realpath, split +import glob + +import hashlib +from re import match + +import sh +import shutil +import fnmatch +import zipfile +import urllib.request +from urllib.request import urlretrieve +from os import listdir, unlink, environ, curdir, walk +from sys import stdout +from wheel.wheelfile import WheelFile +from wheel.cli.tags import tags as wheel_tags +import time +try: + from urlparse import urlparse +except ImportError: + from urllib.parse import urlparse + +import packaging.version + +from pythonforandroid.logger import ( + logger, info, warning, debug, shprint, info_main, error) +from pythonforandroid.util import ( + current_directory, ensure_dir, BuildInterruptingException, rmdir, move, + touch) +from pythonforandroid.util import load_source as import_recipe + + +url_opener = urllib.request.build_opener() +url_orig_headers = url_opener.addheaders +urllib.request.install_opener(url_opener) + + +class RecipeMeta(type): + def __new__(cls, name, bases, dct): + if name != 'Recipe': + if 'url' in dct: + dct['_url'] = dct.pop('url') + if 'version' in dct: + dct['_version'] = dct.pop('version') + + return super().__new__(cls, name, bases, dct) + + +class Recipe(metaclass=RecipeMeta): + _url = None + '''The address from which the recipe may be downloaded. This is not + essential, it may be omitted if the source is available some other + way, such as via the :class:`IncludedFilesBehaviour` mixin. + + If the url includes the version, you may (and probably should) + replace this with ``{version}``, which will automatically be + replaced by the :attr:`version` string during download. + + .. note:: Methods marked (internal) are used internally and you + probably don't need to call them, but they are available + if you want. + ''' + + _version = None + '''A string giving the version of the software the recipe describes, + e.g. ``2.0.3`` or ``master``.''' + + md5sum = None + '''The md5sum of the source from the :attr:`url`. Non-essential, but + you should try to include this, it is used to check that the download + finished correctly. + ''' + + sha512sum = None + '''The sha512sum of the source from the :attr:`url`. Non-essential, but + you should try to include this, it is used to check that the download + finished correctly. + ''' + + blake2bsum = None + '''The blake2bsum of the source from the :attr:`url`. Non-essential, but + you should try to include this, it is used to check that the download + finished correctly. + ''' + + depends = [] + '''A list containing the names of any recipes that this recipe depends on. + ''' + + conflicts = [] + '''A list containing the names of any recipes that are known to be + incompatible with this one.''' + + opt_depends = [] + '''A list of optional dependencies, that must be built before this + recipe if they are built at all, but whose presence is not essential.''' + + patches = [] + '''A list of patches to apply to the source. Values can be either a string + referring to the patch file relative to the recipe dir, or a tuple of the + string patch file and a callable, which will receive the kwargs `arch` and + `recipe`, which should return True if the patch should be applied.''' + + python_depends = [] + '''A list of pure-Python packages that this package requires. These + packages will NOT be available at build time, but will be added to the + list of pure-Python packages to install via pip. If you need these packages + at build time, you must create a recipe.''' + + archs = ['armeabi'] # Not currently implemented properly + + built_libraries = {} + """Each recipe that builds a system library (e.g.:libffi, openssl, etc...) + should contain a dict holding the relevant information of the library. The + keys should be the generated libraries and the values the relative path of + the library inside his build folder. This dict will be used to perform + different operations: + - copy the library into the right location, depending on if it's shared + or static) + - check if we have to rebuild the library + + Here an example of how it would look like for `libffi` recipe: + + - `built_libraries = {'libffi.so': '.libs'}` + + .. note:: in case that the built library resides in recipe's build + directory, you can set the following values for the relative + path: `'.', None or ''` + """ + + need_stl_shared = False + '''Some libraries or python packages may need the c++_shared in APK. + We can automatically do this for any recipe if we set this property to + `True`''' + + stl_lib_name = 'c++_shared' + ''' + The default STL shared lib to use: `c++_shared`. + + .. note:: Android NDK version > 17 only supports 'c++_shared', because + starting from NDK r18 the `gnustl_shared` lib has been deprecated. + ''' + + def get_stl_library(self, arch): + return join( + arch.ndk_lib_dir, + 'lib{name}.so'.format(name=self.stl_lib_name), + ) + + def install_stl_lib(self, arch): + if not self.ctx.has_lib( + arch.arch, 'lib{name}.so'.format(name=self.stl_lib_name) + ): + self.install_libs(arch, self.get_stl_library(arch)) + + @property + def version(self): + key = 'VERSION_' + self.name + return environ.get(key, self._version) + + @property + def url(self): + key = 'URL_' + self.name + return environ.get(key, self._url) + + @property + def versioned_url(self): + '''A property returning the url of the recipe with ``{version}`` + replaced by the :attr:`url`. If accessing the url, you should use this + property, *not* access the url directly.''' + if self.url is None: + return None + return self.url.format(version=self.version) + + def download_file(self, url, target, cwd=None): + """ + (internal) Download an ``url`` to a ``target``. + """ + if not url: + return + + info('Downloading {} from {}'.format(self.name, url)) + + if cwd: + target = join(cwd, target) + + parsed_url = urlparse(url) + if parsed_url.scheme in ('http', 'https'): + def report_hook(index, blksize, size): + if size <= 0: + progression = '{0} bytes'.format(index * blksize) + else: + progression = '{0:.2f}%'.format( + index * blksize * 100. / float(size)) + if "CI" not in environ: + stdout.write('- Download {}\r'.format(progression)) + stdout.flush() + + if exists(target): + unlink(target) + + # Download item with multiple attempts (for bad connections): + attempts = 0 + seconds = 1 + while True: + try: + # jqueryui.com returns a 403 w/ the default user agent + # Mozilla/5.0 doesnt handle redirection for liblzma + url_opener.addheaders = [('User-agent', 'Wget/1.0')] + urlretrieve(url, target, report_hook) + except OSError as e: + attempts += 1 + if attempts >= 5: + raise + stdout.write('Download failed: {}; retrying in {} second(s)...'.format(e, seconds)) + time.sleep(seconds) + seconds *= 2 + continue + finally: + url_opener.addheaders = url_orig_headers + break + return target + elif parsed_url.scheme in ('git', 'git+file', 'git+ssh', 'git+http', 'git+https'): + if not isdir(target): + if url.startswith('git+'): + url = url[4:] + # if 'version' is specified, do a shallow clone + if self.version: + ensure_dir(target) + with current_directory(target): + shprint(sh.git, 'init') + shprint(sh.git, 'remote', 'add', 'origin', url) + else: + shprint(sh.git, 'clone', '--recursive', url, target) + with current_directory(target): + if self.version: + shprint(sh.git, 'fetch', '--tags', '--depth', '1') + shprint(sh.git, 'checkout', self.version) + branch = sh.git('branch', '--show-current') + if branch: + shprint(sh.git, 'pull') + shprint(sh.git, 'pull', '--recurse-submodules') + shprint(sh.git, 'submodule', 'update', '--recursive', '--init', '--depth', '1') + return target + + def apply_patch(self, filename, arch, build_dir=None): + """ + Apply a patch from the current recipe directory into the current + build directory. + + .. versionchanged:: 0.6.0 + Add ability to apply patch from any dir via kwarg `build_dir`''' + """ + info("Applying patch {}".format(filename)) + build_dir = build_dir if build_dir else self.get_build_dir(arch) + filename = join(self.get_recipe_dir(), filename) + shprint(sh.patch, "-t", "-d", build_dir, "-p1", + "-i", filename, _tail=10) + + def copy_file(self, filename, dest): + info("Copy {} to {}".format(filename, dest)) + filename = join(self.get_recipe_dir(), filename) + dest = join(self.build_dir, dest) + shutil.copy(filename, dest) + + def append_file(self, filename, dest): + info("Append {} to {}".format(filename, dest)) + filename = join(self.get_recipe_dir(), filename) + dest = join(self.build_dir, dest) + with open(filename, "rb") as fd: + data = fd.read() + with open(dest, "ab") as fd: + fd.write(data) + + @property + def name(self): + '''The name of the recipe, the same as the folder containing it.''' + modname = self.__class__.__module__ + return modname.split(".", 2)[-1] + + @property + def filtered_archs(self): + '''Return archs of self.ctx that are valid build archs + for the Recipe.''' + result = [] + for arch in self.ctx.archs: + if not self.archs or (arch.arch in self.archs): + result.append(arch) + return result + + def check_recipe_choices(self): + '''Checks what recipes are being built to see which of the alternative + and optional dependencies are being used, + and returns a list of these.''' + recipes = [] + built_recipes = self.ctx.recipe_build_order + for recipe in self.depends: + if isinstance(recipe, (tuple, list)): + for alternative in recipe: + if alternative in built_recipes: + recipes.append(alternative) + break + for recipe in self.opt_depends: + if recipe in built_recipes: + recipes.append(recipe) + return sorted(recipes) + + def get_opt_depends_in_list(self, recipes): + '''Given a list of recipe names, returns those that are also in + self.opt_depends. + ''' + return [recipe for recipe in recipes if recipe in self.opt_depends] + + def get_build_container_dir(self, arch): + '''Given the arch name, returns the directory where it will be + built. + + This returns a different directory depending on what + alternative or optional dependencies are being built. + ''' + dir_name = self.get_dir_name() + return join(self.ctx.build_dir, 'other_builds', + dir_name, '{}__ndk_target_{}'.format(arch, self.ctx.ndk_api)) + + def get_dir_name(self): + choices = self.check_recipe_choices() + dir_name = '-'.join([self.name] + choices) + return dir_name + + def get_build_dir(self, arch): + '''Given the arch name, returns the directory where the + downloaded/copied package will be built.''' + + return join(self.get_build_container_dir(arch), self.name) + + def get_recipe_dir(self): + """ + Returns the local recipe directory or defaults to the core recipe + directory. + """ + if self.ctx.local_recipes is not None: + local_recipe_dir = join(self.ctx.local_recipes, self.name) + if exists(local_recipe_dir): + return local_recipe_dir + return join(self.ctx.root_dir, 'recipes', self.name) + + # Public Recipe API to be subclassed if needed + + def download_if_necessary(self): + info_main('Downloading {}'.format(self.name)) + user_dir = environ.get('P4A_{}_DIR'.format(self.name.lower())) + if user_dir is not None: + info('P4A_{}_DIR is set, skipping download for {}'.format( + self.name, self.name)) + return + self.download() + + def download(self): + if self.url is None: + info('Skipping {} download as no URL is set'.format(self.name)) + return + + url = self.versioned_url + expected_digests = {} + for alg in set(hashlib.algorithms_guaranteed) | set(('md5', 'sha512', 'blake2b')): + expected_digest = getattr(self, alg + 'sum') if hasattr(self, alg + 'sum') else None + ma = match(u'^(.+)#' + alg + u'=([0-9a-f]{32,})$', url) + if ma: # fragmented URL? + if expected_digest: + raise ValueError( + ('Received {}sum from both the {} recipe ' + 'and its url').format(alg, self.name)) + url = ma.group(1) + expected_digest = ma.group(2) + if expected_digest: + expected_digests[alg] = expected_digest + + ensure_dir(join(self.ctx.packages_path, self.name)) + + with current_directory(join(self.ctx.packages_path, self.name)): + filename = shprint(sh.basename, url).stdout[:-1].decode('utf-8') + + do_download = True + marker_filename = '.mark-{}'.format(filename) + if exists(filename) and isfile(filename): + if not exists(marker_filename): + shprint(sh.rm, filename) + else: + for alg, expected_digest in expected_digests.items(): + current_digest = algsum(alg, filename) + if current_digest != expected_digest: + debug('* Generated {}sum: {}'.format(alg, + current_digest)) + debug('* Expected {}sum: {}'.format(alg, + expected_digest)) + raise ValueError( + ('Generated {0}sum does not match expected {0}sum ' + 'for {1} recipe').format(alg, self.name)) + do_download = False + + # If we got this far, we will download + if do_download: + debug('Downloading {} from {}'.format(self.name, url)) + + shprint(sh.rm, '-f', marker_filename) + self.download_file(self.versioned_url, filename) + touch(marker_filename) + + if exists(filename) and isfile(filename): + for alg, expected_digest in expected_digests.items(): + current_digest = algsum(alg, filename) + if current_digest != expected_digest: + debug('* Generated {}sum: {}'.format(alg, + current_digest)) + debug('* Expected {}sum: {}'.format(alg, + expected_digest)) + raise ValueError( + ('Generated {0}sum does not match expected {0}sum ' + 'for {1} recipe').format(alg, self.name)) + else: + info('{} download already cached, skipping'.format(self.name)) + + def unpack(self, arch): + info_main('Unpacking {} for {}'.format(self.name, arch)) + + build_dir = self.get_build_container_dir(arch) + + user_dir = environ.get('P4A_{}_DIR'.format(self.name.lower())) + if user_dir is not None: + info('P4A_{}_DIR exists, symlinking instead'.format( + self.name.lower())) + if exists(self.get_build_dir(arch)): + return + rmdir(build_dir) + ensure_dir(build_dir) + shprint(sh.cp, '-a', user_dir, self.get_build_dir(arch)) + return + + if self.url is None: + info('Skipping {} unpack as no URL is set'.format(self.name)) + return + + filename = shprint( + sh.basename, self.versioned_url).stdout[:-1].decode('utf-8') + ma = match(u'^(.+)#[a-z0-9_]{3,}=([0-9a-f]{32,})$', filename) + if ma: # fragmented URL? + filename = ma.group(1) + + with current_directory(build_dir): + directory_name = self.get_build_dir(arch) + + if not exists(directory_name) or not isdir(directory_name): + extraction_filename = join( + self.ctx.packages_path, self.name, filename) + if isfile(extraction_filename): + if extraction_filename.endswith(('.zip', '.whl')): + try: + sh.unzip(extraction_filename) + except (sh.ErrorReturnCode_1, sh.ErrorReturnCode_2): + # return code 1 means unzipping had + # warnings but did complete, + # apparently happens sometimes with + # github zips + pass + fileh = zipfile.ZipFile(extraction_filename, 'r') + root_directory = fileh.filelist[0].filename.split('/')[0] + if root_directory != basename(directory_name): + move(root_directory, directory_name) + elif extraction_filename.endswith( + ('.tar.gz', '.tgz', '.tar.bz2', '.tbz2', '.tar.xz', '.txz')): + sh.tar('xf', extraction_filename) + root_directory = sh.tar('tf', extraction_filename).stdout.decode( + 'utf-8').split('\n')[0].split('/')[0] + if root_directory != basename(directory_name): + move(root_directory, directory_name) + else: + raise Exception( + 'Could not extract {} download, it must be .zip, ' + '.tar.gz or .tar.bz2 or .tar.xz'.format(extraction_filename)) + elif isdir(extraction_filename): + ensure_dir(directory_name) + for entry in listdir(extraction_filename): + # Previously we filtered out the .git folder, but during the build process for some recipes + # (e.g. when version is parsed by `setuptools_scm`) that may be needed. + shprint(sh.cp, '-Rv', + join(extraction_filename, entry), + directory_name) + else: + raise Exception( + 'Given path is neither a file nor a directory: {}' + .format(extraction_filename)) + + else: + info('{} is already unpacked, skipping'.format(self.name)) + + def get_recipe_env(self, arch=None, with_flags_in_cc=True): + """Return the env specialized for the recipe + """ + if arch is None: + arch = self.filtered_archs[0] + env = arch.get_env(with_flags_in_cc=with_flags_in_cc) + return env + + def prebuild_arch(self, arch): + '''Run any pre-build tasks for the Recipe. By default, this checks if + any prebuild_archname methods exist for the archname of the current + architecture, and runs them if so.''' + prebuild = "prebuild_{}".format(arch.arch.replace('-', '_')) + if hasattr(self, prebuild): + getattr(self, prebuild)() + else: + info('{} has no {}, skipping'.format(self.name, prebuild)) + + def is_patched(self, arch): + build_dir = self.get_build_dir(arch.arch) + return exists(join(build_dir, '.patched')) + + def apply_patches(self, arch, build_dir=None): + '''Apply any patches for the Recipe. + + .. versionchanged:: 0.6.0 + Add ability to apply patches from any dir via kwarg `build_dir`''' + if self.patches: + info_main('Applying patches for {}[{}]' + .format(self.name, arch.arch)) + + if self.is_patched(arch): + info_main('{} already patched, skipping'.format(self.name)) + return + + build_dir = build_dir if build_dir else self.get_build_dir(arch.arch) + for patch in self.patches: + if isinstance(patch, (tuple, list)): + patch, patch_check = patch + if not patch_check(arch=arch, recipe=self): + continue + + self.apply_patch( + patch.format(version=self.version, arch=arch.arch), + arch.arch, build_dir=build_dir) + + touch(join(build_dir, '.patched')) + + def should_build(self, arch): + '''Should perform any necessary test and return True only if it needs + building again. Per default we implement a library test, in case that + we detect so. + + ''' + if self.built_libraries: + return not all( + exists(lib) for lib in self.get_libraries(arch.arch) + ) + return True + + def build_arch(self, arch): + '''Run any build tasks for the Recipe. By default, this checks if + any build_archname methods exist for the archname of the current + architecture, and runs them if so.''' + build = "build_{}".format(arch.arch) + if hasattr(self, build): + getattr(self, build)() + + def install_libraries(self, arch): + '''This method is always called after `build_arch`. In case that we + detect a library recipe, defined by the class attribute + `built_libraries`, we will copy all defined libraries into the + right location. + ''' + if not self.built_libraries: + return + shared_libs = [ + lib for lib in self.get_libraries(arch) if lib.endswith(".so") + ] + self.install_libs(arch, *shared_libs) + + def postbuild_arch(self, arch): + '''Run any post-build tasks for the Recipe. By default, this checks if + any postbuild_archname methods exist for the archname of the + current architecture, and runs them if so. + ''' + postbuild = "postbuild_{}".format(arch.arch) + if hasattr(self, postbuild): + getattr(self, postbuild)() + + if self.need_stl_shared: + self.install_stl_lib(arch) + + def prepare_build_dir(self, arch): + '''Copies the recipe data into a build dir for the given arch. By + default, this unpacks a downloaded recipe. You should override + it (or use a Recipe subclass with different behaviour) if you + want to do something else. + ''' + self.unpack(arch) + + def clean_build(self, arch=None): + '''Deletes all the build information of the recipe. + + If arch is not None, only this arch dir is deleted. Otherwise + (the default) all builds for all archs are deleted. + + By default, this just deletes the main build dir. If the + recipe has e.g. object files biglinked, or .so files stored + elsewhere, you should override this method. + + This method is intended for testing purposes, it may have + strange results. Rebuild everything if this seems to happen. + + ''' + if arch is None: + base_dir = join(self.ctx.build_dir, 'other_builds', self.name) + else: + base_dir = self.get_build_container_dir(arch) + dirs = glob.glob(base_dir + '-*') + if exists(base_dir): + dirs.append(base_dir) + if not dirs: + warning('Attempted to clean build for {} but found no existing ' + 'build dirs'.format(self.name)) + + for directory in dirs: + rmdir(directory) + + # Delete any Python distributions to ensure the recipe build + # doesn't persist in site-packages + rmdir(self.ctx.python_installs_dir) + + def install_libs(self, arch, *libs): + libs_dir = self.ctx.get_libs_dir(arch.arch) + if not libs: + warning('install_libs called with no libraries to install!') + return + args = libs + (libs_dir,) + shprint(sh.cp, *args) + + def has_libs(self, arch, *libs): + return all(map(lambda lib: self.ctx.has_lib(arch.arch, lib), libs)) + + def get_libraries(self, arch_name, in_context=False): + """Return the full path of the library depending on the architecture. + Per default, the build library path it will be returned, unless + `get_libraries` has been called with kwarg `in_context` set to + True. + + .. note:: this method should be used for library recipes only + """ + recipe_libs = set() + if not self.built_libraries: + return recipe_libs + for lib, rel_path in self.built_libraries.items(): + if not in_context: + abs_path = join(self.get_build_dir(arch_name), rel_path, lib) + if rel_path in {".", "", None}: + abs_path = join(self.get_build_dir(arch_name), lib) + else: + abs_path = join(self.ctx.get_libs_dir(arch_name), lib) + recipe_libs.add(abs_path) + return recipe_libs + + @classmethod + def recipe_dirs(cls, ctx): + recipe_dirs = [] + if ctx.local_recipes is not None: + recipe_dirs.append(realpath(ctx.local_recipes)) + if ctx.storage_dir: + recipe_dirs.append(join(ctx.storage_dir, 'recipes')) + recipe_dirs.append(join(ctx.root_dir, "recipes")) + return recipe_dirs + + @classmethod + def list_recipes(cls, ctx): + forbidden_dirs = ('__pycache__', ) + for recipes_dir in cls.recipe_dirs(ctx): + if recipes_dir and exists(recipes_dir): + for name in listdir(recipes_dir): + if name in forbidden_dirs: + continue + fn = join(recipes_dir, name) + if isdir(fn): + yield name + + @classmethod + def get_recipe(cls, name, ctx): + '''Returns the Recipe with the given name, if it exists.''' + name = name.lower() + if not hasattr(cls, "recipes"): + cls.recipes = {} + if name in cls.recipes: + return cls.recipes[name] + + recipe_file = None + for recipes_dir in cls.recipe_dirs(ctx): + if not exists(recipes_dir): + continue + # Find matching folder (may differ in case): + for subfolder in listdir(recipes_dir): + if subfolder.lower() == name: + recipe_file = join(recipes_dir, subfolder, '__init__.py') + if exists(recipe_file): + name = subfolder # adapt to actual spelling + break + recipe_file = None + if recipe_file is not None: + break + + else: + raise ValueError('Recipe does not exist: {}'.format(name)) + + mod = import_recipe('pythonforandroid.recipes.{}'.format(name), recipe_file) + if len(logger.handlers) > 1: + logger.removeHandler(logger.handlers[1]) + recipe = mod.recipe + recipe.ctx = ctx + cls.recipes[name.lower()] = recipe + return recipe + + +class IncludedFilesBehaviour(object): + '''Recipe mixin class that will automatically unpack files included in + the recipe directory.''' + src_filename = None + + def prepare_build_dir(self, arch): + if self.src_filename is None: + raise BuildInterruptingException( + 'IncludedFilesBehaviour failed: no src_filename specified') + rmdir(self.get_build_dir(arch)) + shprint(sh.cp, '-a', join(self.get_recipe_dir(), self.src_filename), + self.get_build_dir(arch)) + + +class BootstrapNDKRecipe(Recipe): + '''A recipe class for recipes built in an Android project jni dir with + an Android.mk. These are not cached separatly, but built in the + bootstrap's own building directory. + + To build an NDK project which is not part of the bootstrap, see + :class:`~pythonforandroid.recipe.NDKRecipe`. + + To link with python, call the method :meth:`get_recipe_env` + with the kwarg *with_python=True*. + ''' + + dir_name = None # The name of the recipe build folder in the jni dir + + def get_build_container_dir(self, arch): + return self.get_jni_dir() + + def get_build_dir(self, arch): + if self.dir_name is None: + raise ValueError('{} recipe doesn\'t define a dir_name, but ' + 'this is necessary'.format(self.name)) + return join(self.get_build_container_dir(arch), self.dir_name) + + def get_jni_dir(self): + return join(self.ctx.bootstrap.build_dir, 'jni') + + def get_recipe_env(self, arch=None, with_flags_in_cc=True, with_python=False): + env = super().get_recipe_env(arch, with_flags_in_cc) + if not with_python: + return env + + env['PYTHON_INCLUDE_ROOT'] = self.ctx.python_recipe.include_root(arch.arch) + env['PYTHON_LINK_ROOT'] = self.ctx.python_recipe.link_root(arch.arch) + env['EXTRA_LDLIBS'] = ' -lpython{}'.format( + self.ctx.python_recipe.link_version) + return env + + +class NDKRecipe(Recipe): + '''A recipe class for any NDK project not included in the bootstrap.''' + + generated_libraries = [] + + def should_build(self, arch): + lib_dir = self.get_lib_dir(arch) + + for lib in self.generated_libraries: + if not exists(join(lib_dir, lib)): + return True + + return False + + def get_lib_dir(self, arch): + return join(self.get_build_dir(arch.arch), 'obj', 'local', arch.arch) + + def get_jni_dir(self, arch): + return join(self.get_build_dir(arch.arch), 'jni') + + def build_arch(self, arch, *extra_args): + super().build_arch(arch) + + env = self.get_recipe_env(arch) + with current_directory(self.get_build_dir(arch.arch)): + shprint( + sh.Command(join(self.ctx.ndk_dir, "ndk-build")), + 'V=1', + 'NDK_DEBUG=' + ("1" if self.ctx.build_as_debuggable else "0"), + 'APP_PLATFORM=android-' + str(self.ctx.ndk_api), + 'APP_ABI=' + arch.arch, + *extra_args, _env=env + ) + + +class PythonRecipe(Recipe): + site_packages_name = None + '''The name of the module's folder when installed in the Python + site-packages (e.g. for pyjnius it is 'jnius')''' + + call_hostpython_via_targetpython = True + '''If True, tries to install the module using the hostpython binary + copied to the target (normally arm) python build dir. However, this + will fail if the module tries to import e.g. _io.so. Set this to False + to call hostpython from its own build dir, installing the module in + the right place via arguments to setup.py. However, this may not set + the environment correctly and so False is not the default.''' + + install_in_hostpython = False + '''If True, additionally installs the module in the hostpython build + dir. This will make it available to other recipes if + call_hostpython_via_targetpython is False. + ''' + + install_in_targetpython = True + '''If True, installs the module in the targetpython installation dir. + This is almost always what you want to do.''' + + setup_extra_args = [] + '''List of extra arguments to pass to setup.py''' + + depends = ['python3'] + ''' + .. note:: it's important to keep this depends as a class attribute outside + `__init__` because sometimes we only initialize the class, so the + `__init__` call won't be called and the deps would be missing + (which breaks the dependency graph computation) + + .. warning:: don't forget to call `super().__init__()` in any recipe's + `__init__`, or otherwise it may not be ensured that it depends + on python2 or python3 which can break the dependency graph + ''' + + hostpython_prerequisites = [] + '''List of hostpython packages required to build a recipe''' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if 'python3' not in self.depends: + # We ensure here that the recipe depends on python even it overrode + # `depends`. We only do this if it doesn't already depend on any + # python, since some recipes intentionally don't depend on/work + # with all python variants + depends = self.depends + depends.append('python3') + depends = list(set(depends)) + self.depends = depends + + def clean_build(self, arch=None): + super().clean_build(arch=arch) + name = self.folder_name + python_install_dirs = glob.glob(join(self.ctx.python_installs_dir, '*')) + for python_install in python_install_dirs: + site_packages_dir = glob.glob(join(python_install, 'lib', 'python*', + 'site-packages')) + if site_packages_dir: + build_dir = join(site_packages_dir[0], name) + if exists(build_dir): + info('Deleted {}'.format(build_dir)) + rmdir(build_dir) + + @property + def real_hostpython_location(self): + host_name = 'host{}'.format(self.ctx.python_recipe.name) + if host_name == 'hostpython3': + python_recipe = Recipe.get_recipe(host_name, self.ctx) + return python_recipe.python_exe + else: + python_recipe = self.ctx.python_recipe + return 'python{}'.format(python_recipe.version) + + @property + def hostpython_location(self): + if not self.call_hostpython_via_targetpython: + return self.real_hostpython_location + return self.ctx.hostpython + + @property + def folder_name(self): + '''The name of the build folders containing this recipe.''' + name = self.site_packages_name + if name is None: + name = self.name + return name + + def get_recipe_env(self, arch=None, with_flags_in_cc=True): + env = super().get_recipe_env(arch, with_flags_in_cc) + env['PYTHONNOUSERSITE'] = '1' + # Set the LANG, this isn't usually important but is a better default + # as it occasionally matters how Python e.g. reads files + env['LANG'] = "en_GB.UTF-8" + # Binaries made by packages installed by pip + env["PATH"] = join(self.hostpython_site_dir, "bin") + ":" + env["PATH"] + + if not self.call_hostpython_via_targetpython: + env['CFLAGS'] += ' -I{}'.format( + self.ctx.python_recipe.include_root(arch.arch) + ) + env['LDFLAGS'] += ' -L{} -lpython{}'.format( + self.ctx.python_recipe.link_root(arch.arch), + self.ctx.python_recipe.link_version, + ) + + hppath = [] + hppath.append(join(dirname(self.hostpython_location), 'Lib')) + hppath.append(join(hppath[0], 'site-packages')) + builddir = join(dirname(self.hostpython_location), 'build') + if exists(builddir): + hppath += [join(builddir, d) for d in listdir(builddir) + if isdir(join(builddir, d))] + if len(hppath) > 0: + if 'PYTHONPATH' in env: + env['PYTHONPATH'] = ':'.join(hppath + [env['PYTHONPATH']]) + else: + env['PYTHONPATH'] = ':'.join(hppath) + return env + + def should_build(self, arch): + name = self.folder_name + if self.ctx.has_package(name, arch): + info('Python package already exists in site-packages') + return False + info('{} apparently isn\'t already in site-packages'.format(name)) + return True + + def build_arch(self, arch): + '''Install the Python module by calling setup.py install with + the target Python dir.''' + self.install_hostpython_prerequisites() + super().build_arch(arch) + self.install_python_package(arch) + + def install_python_package(self, arch, name=None, env=None, is_dir=True): + '''Automate the installation of a Python package (or a cython + package where the cython components are pre-built).''' + # arch = self.filtered_archs[0] # old kivy-ios way + if name is None: + name = self.name + if env is None: + env = self.get_recipe_env(arch) + + info('Installing {} into site-packages'.format(self.name)) + + hostpython = sh.Command(self.hostpython_location) + hpenv = env.copy() + with current_directory(self.get_build_dir(arch.arch)): + shprint(hostpython, 'setup.py', 'install', '-O2', + '--root={}'.format(self.ctx.get_python_install_dir(arch.arch)), + '--install-lib=.', + _env=hpenv, *self.setup_extra_args) + + # If asked, also install in the hostpython build dir + if self.install_in_hostpython: + self.install_hostpython_package(arch) + + def get_hostrecipe_env(self, arch): + env = environ.copy() + env['PYTHONPATH'] = self.hostpython_site_dir + return env + + @property + def hostpython_site_dir(self): + return join(dirname(self.real_hostpython_location), 'Lib', 'site-packages') + + def install_hostpython_package(self, arch): + env = self.get_hostrecipe_env(arch) + real_hostpython = sh.Command(self.real_hostpython_location) + shprint(real_hostpython, 'setup.py', 'install', '-O2', + '--root={}'.format(dirname(self.real_hostpython_location)), + '--install-lib=Lib/site-packages', + _env=env, *self.setup_extra_args) + + @property + def python_major_minor_version(self): + parsed_version = packaging.version.parse(self.ctx.python_recipe.version) + return f"{parsed_version.major}.{parsed_version.minor}" + + def install_hostpython_prerequisites(self, packages=None, force_upgrade=True): + if not packages: + packages = self.hostpython_prerequisites + + if len(packages) == 0: + return + + pip_options = [ + "install", + *packages, + "--target", self.hostpython_site_dir, "--python-version", + self.ctx.python_recipe.version, + # Don't use sources, instead wheels + "--only-binary=:all:", + ] + if force_upgrade: + pip_options.append("--upgrade") + # Use system's pip + shprint(sh.pip, *pip_options) + + def restore_hostpython_prerequisites(self, packages): + _packages = [] + for package in packages: + original_version = Recipe.get_recipe(package, self.ctx).version + _packages.append(package + "==" + original_version) + self.install_hostpython_prerequisites(packages=_packages) + + +class CompiledComponentsPythonRecipe(PythonRecipe): + pre_build_ext = False + + build_cmd = 'build_ext' + + def build_arch(self, arch): + '''Build any cython components, then install the Python module by + calling setup.py install with the target Python dir. + ''' + Recipe.build_arch(self, arch) + self.install_hostpython_prerequisites() + self.build_compiled_components(arch) + self.install_python_package(arch) + + def build_compiled_components(self, arch): + info('Building compiled components in {}'.format(self.name)) + + env = self.get_recipe_env(arch) + hostpython = sh.Command(self.hostpython_location) + with current_directory(self.get_build_dir(arch.arch)): + if self.install_in_hostpython: + shprint(hostpython, 'setup.py', 'clean', '--all', _env=env) + shprint(hostpython, 'setup.py', self.build_cmd, '-v', + _env=env, *self.setup_extra_args) + build_dir = glob.glob('build/lib.*')[0] + shprint(sh.find, build_dir, '-name', '"*.o"', '-exec', + env['STRIP'], '{}', ';', _env=env) + + def install_hostpython_package(self, arch): + env = self.get_hostrecipe_env(arch) + self.rebuild_compiled_components(arch, env) + super().install_hostpython_package(arch) + + def rebuild_compiled_components(self, arch, env): + info('Rebuilding compiled components in {}'.format(self.name)) + + hostpython = sh.Command(self.real_hostpython_location) + shprint(hostpython, 'setup.py', 'clean', '--all', _env=env) + shprint(hostpython, 'setup.py', self.build_cmd, '-v', _env=env, + *self.setup_extra_args) + + +class CppCompiledComponentsPythonRecipe(CompiledComponentsPythonRecipe): + """ Extensions that require the cxx-stl """ + call_hostpython_via_targetpython = False + need_stl_shared = True + + +class CythonRecipe(PythonRecipe): + pre_build_ext = False + cythonize = True + cython_args = [] + call_hostpython_via_targetpython = False + + def build_arch(self, arch): + '''Build any cython components, then install the Python module by + calling setup.py install with the target Python dir. + ''' + Recipe.build_arch(self, arch) + self.build_cython_components(arch) + self.install_python_package(arch) + + def build_cython_components(self, arch): + info('Cythonizing anything necessary in {}'.format(self.name)) + + env = self.get_recipe_env(arch) + + with current_directory(self.get_build_dir(arch.arch)): + hostpython = sh.Command(self.ctx.hostpython) + shprint(hostpython, '-c', 'import sys; print(sys.path)', _env=env) + debug('cwd is {}'.format(realpath(curdir))) + info('Trying first build of {} to get cython files: this is ' + 'expected to fail'.format(self.name)) + + manually_cythonise = False + try: + shprint(hostpython, 'setup.py', 'build_ext', '-v', _env=env, + *self.setup_extra_args) + except sh.ErrorReturnCode_1: + print() + info('{} first build failed (as expected)'.format(self.name)) + manually_cythonise = True + + if manually_cythonise: + self.cythonize_build(env=env) + shprint(hostpython, 'setup.py', 'build_ext', '-v', _env=env, + _tail=20, _critical=True, *self.setup_extra_args) + else: + info('First build appeared to complete correctly, skipping manual' + 'cythonising.') + + if not self.ctx.with_debug_symbols: + self.strip_object_files(arch, env) + + def strip_object_files(self, arch, env, build_dir=None): + if build_dir is None: + build_dir = self.get_build_dir(arch.arch) + with current_directory(build_dir): + info('Stripping object files') + shprint(sh.find, '.', '-iname', '*.so', '-exec', + '/usr/bin/echo', '{}', ';', _env=env) + shprint(sh.find, '.', '-iname', '*.so', '-exec', + env['STRIP'].split(' ')[0], '--strip-unneeded', + # '/usr/bin/strip', '--strip-unneeded', + '{}', ';', _env=env) + + def cythonize_file(self, env, build_dir, filename): + short_filename = filename + if filename.startswith(build_dir): + short_filename = filename[len(build_dir) + 1:] + info(u"Cythonize {}".format(short_filename)) + cyenv = env.copy() + if 'CYTHONPATH' in cyenv: + cyenv['PYTHONPATH'] = cyenv['CYTHONPATH'] + elif 'PYTHONPATH' in cyenv: + del cyenv['PYTHONPATH'] + if 'PYTHONNOUSERSITE' in cyenv: + cyenv.pop('PYTHONNOUSERSITE') + python_command = sh.Command("python{}".format( + self.ctx.python_recipe.major_minor_version_string.split(".")[0] + )) + shprint(python_command, "-c" + "import sys; from Cython.Compiler.Main import setuptools_main; sys.exit(setuptools_main());", + filename, *self.cython_args, _env=cyenv) + + def cythonize_build(self, env, build_dir="."): + if not self.cythonize: + info('Running cython cancelled per recipe setting') + return + info('Running cython where appropriate') + for root, dirnames, filenames in walk("."): + for filename in fnmatch.filter(filenames, "*.pyx"): + self.cythonize_file(env, build_dir, join(root, filename)) + + def get_recipe_env(self, arch, with_flags_in_cc=True): + env = super().get_recipe_env(arch, with_flags_in_cc) + env['LDFLAGS'] = env['LDFLAGS'] + ' -L{} '.format( + self.ctx.get_libs_dir(arch.arch) + + ' -L{} '.format(self.ctx.libs_dir) + + ' -L{}'.format(join(self.ctx.bootstrap.build_dir, 'obj', 'local', + arch.arch))) + + env['LDSHARED'] = env['CC'] + ' -shared' + # shprint(sh.whereis, env['LDSHARED'], _env=env) + env['LIBLINK'] = 'NOTNONE' + if self.ctx.copy_libs: + env['COPYLIBS'] = '1' + + # Every recipe uses its own liblink path, object files are + # collected and biglinked later + liblink_path = join(self.get_build_container_dir(arch.arch), + 'objects_{}'.format(self.name)) + env['LIBLINK_PATH'] = liblink_path + ensure_dir(liblink_path) + + return env + + +class PyProjectRecipe(PythonRecipe): + '''Recipe for projects which containes `pyproject.toml`''' + + # Extra args to pass to `python -m build ...` + extra_build_args = [] + call_hostpython_via_targetpython = False + + def get_recipe_env(self, arch, **kwargs): + # Custom hostpython + self.ctx.python_recipe.python_exe = join( + self.ctx.python_recipe.get_build_dir(arch), "android-build", "python3") + env = super().get_recipe_env(arch, **kwargs) + build_dir = self.get_build_dir(arch) + ensure_dir(build_dir) + build_opts = join(build_dir, "build-opts.cfg") + + with open(build_opts, "w") as file: + file.write("[bdist_wheel]\nplat-name={}".format( + self.get_wheel_platform_tag(arch) + )) + file.close() + + env["DIST_EXTRA_CONFIG"] = build_opts + return env + + def get_wheel_platform_tag(self, arch): + return "android_" + { + "armeabi-v7a": "arm", + "arm64-v8a": "aarch64", + "x86_64": "x86_64", + "x86": "i686", + }[arch.arch] + + def install_wheel(self, arch, built_wheels): + _wheel = built_wheels[0] + built_wheel_dir = dirname(_wheel) + # Fix wheel platform tag + wheel_tag = wheel_tags( + _wheel, + platform_tags=self.get_wheel_platform_tag(arch), + remove=True, + ) + selected_wheel = join(built_wheel_dir, wheel_tag) + + _dev_wheel_dir = environ.get("P4A_WHEEL_DIR", False) + if _dev_wheel_dir: + ensure_dir(_dev_wheel_dir) + shprint(sh.cp, selected_wheel, _dev_wheel_dir) + + info(f"Installing built wheel: {wheel_tag}") + destination = self.ctx.get_python_install_dir(arch.arch) + with WheelFile(selected_wheel) as wf: + for zinfo in wf.filelist: + wf.extract(zinfo, destination) + wf.close() + + def build_arch(self, arch): + self.install_hostpython_prerequisites( + packages=["build[virtualenv]", "pip"] + self.hostpython_prerequisites + ) + build_dir = self.get_build_dir(arch.arch) + env = self.get_recipe_env(arch, with_flags_in_cc=True) + # make build dir separatly + sub_build_dir = join(build_dir, "p4a_android_build") + ensure_dir(sub_build_dir) + # copy hostpython to built python to ensure correct selection of libs and includes + shprint(sh.cp, self.real_hostpython_location, self.ctx.python_recipe.python_exe) + + build_args = [ + "-m", + "build", + "--wheel", + "--config-setting", + "builddir={}".format(sub_build_dir), + ] + self.extra_build_args + + built_wheels = [] + with current_directory(build_dir): + shprint( + sh.Command(self.ctx.python_recipe.python_exe), *build_args, _env=env + ) + built_wheels = [realpath(whl) for whl in glob.glob("dist/*.whl")] + self.install_wheel(arch, built_wheels) + + +class MesonRecipe(PyProjectRecipe): + '''Recipe for projects which uses meson as build system''' + + meson_version = "1.4.0" + ninja_version = "1.11.1.1" + + def sanitize_flags(self, *flag_strings): + return " ".join(flag_strings).strip().split(" ") + + def get_recipe_meson_options(self, arch): + env = self.get_recipe_env(arch, with_flags_in_cc=True) + return { + "binaries": { + "c": arch.get_clang_exe(with_target=True), + "cpp": arch.get_clang_exe(with_target=True, plus_plus=True), + "ar": self.ctx.ndk.llvm_ar, + "strip": self.ctx.ndk.llvm_strip, + }, + "built-in options": { + "c_args": self.sanitize_flags(env["CFLAGS"], env["CPPFLAGS"]), + "cpp_args": self.sanitize_flags(env["CXXFLAGS"], env["CPPFLAGS"]), + "c_link_args": self.sanitize_flags(env["LDFLAGS"]), + "cpp_link_args": self.sanitize_flags(env["LDFLAGS"]), + }, + "properties": { + "needs_exe_wrapper": True, + "sys_root": self.ctx.ndk.sysroot + }, + "host_machine": { + "cpu_family": { + "arm64-v8a": "aarch64", + "armeabi-v7a": "arm", + "x86_64": "x86_64", + "x86": "x86" + }[arch.arch], + "cpu": { + "arm64-v8a": "aarch64", + "armeabi-v7a": "armv7", + "x86_64": "x86_64", + "x86": "i686" + }[arch.arch], + "endian": "little", + "system": "android", + } + } + + def write_build_options(self, arch): + """Writes python dict to meson config file""" + option_data = "" + build_options = self.get_recipe_meson_options(arch) + for key in build_options.keys(): + data_chunk = "[{}]".format(key) + for subkey in build_options[key].keys(): + value = build_options[key][subkey] + if isinstance(value, int): + value = str(value) + elif isinstance(value, str): + value = "'{}'".format(value) + elif isinstance(value, bool): + value = "true" if value else "false" + elif isinstance(value, list): + value = "['" + "', '".join(value) + "']" + data_chunk += "\n" + subkey + " = " + value + option_data += data_chunk + "\n\n" + return option_data + + def ensure_args(self, *args): + for arg in args: + if arg not in self.extra_build_args: + self.extra_build_args.append(arg) + + def build_arch(self, arch): + cross_file = join("/tmp", "android.meson.cross") + info("Writing cross file at: {}".format(cross_file)) + # write cross config file + with open(cross_file, "w") as file: + file.write(self.write_build_options(arch)) + file.close() + # set cross file + self.ensure_args('-Csetup-args=--cross-file', '-Csetup-args={}'.format(cross_file)) + # ensure ninja and meson + for dep in [ + "ninja=={}".format(self.ninja_version), + "meson=={}".format(self.meson_version), + ]: + if dep not in self.hostpython_prerequisites: + self.hostpython_prerequisites.append(dep) + super().build_arch(arch) + + +class RustCompiledComponentsRecipe(PyProjectRecipe): + # Rust toolchain codes + # https://doc.rust-lang.org/nightly/rustc/platform-support.html + RUST_ARCH_CODES = { + "arm64-v8a": "aarch64-linux-android", + "armeabi-v7a": "armv7-linux-androideabi", + "x86_64": "x86_64-linux-android", + "x86": "i686-linux-android", + } + + call_hostpython_via_targetpython = False + + def get_recipe_env(self, arch, **kwargs): + env = super().get_recipe_env(arch, **kwargs) + + # Set rust build target + build_target = self.RUST_ARCH_CODES[arch.arch] + cargo_linker_name = "CARGO_TARGET_{}_LINKER".format( + build_target.upper().replace("-", "_") + ) + env["CARGO_BUILD_TARGET"] = build_target + env[cargo_linker_name] = join( + self.ctx.ndk.llvm_prebuilt_dir, + "bin", + "{}{}-clang".format( + # NDK's Clang format + build_target.replace("7", "7a") + if build_target.startswith("armv7") + else build_target, + self.ctx.ndk_api, + ), + ) + realpython_dir = self.ctx.python_recipe.get_build_dir(arch.arch) + + env["RUSTFLAGS"] = "-Clink-args=-L{} -L{}".format( + self.ctx.get_libs_dir(arch.arch), join(realpython_dir, "android-build") + ) + + env["PYO3_CROSS_LIB_DIR"] = realpath(glob.glob(join( + realpython_dir, "android-build", "build", + "lib.linux-*-{}/".format(self.python_major_minor_version), + ))[0]) + + info_main("Ensuring rust build toolchain") + shprint(sh.rustup, "target", "add", build_target) + + # Add host python to PATH + env["PATH"] = ("{hostpython_dir}:{old_path}").format( + hostpython_dir=Recipe.get_recipe( + "hostpython3", self.ctx + ).get_path_to_python(), + old_path=env["PATH"], + ) + return env + + def check_host_deps(self): + if not hasattr(sh, "rustup"): + error( + "`rustup` was not found on host system." + "Please install it using :" + "\n`curl https://sh.rustup.rs -sSf | sh`\n" + ) + exit(1) + + def build_arch(self, arch): + self.check_host_deps() + super().build_arch(arch) + + +class TargetPythonRecipe(Recipe): + '''Class for target python recipes. Sets ctx.python_recipe to point to + itself, so as to know later what kind of Python was built or used.''' + + def __init__(self, *args, **kwargs): + self._ctx = None + super().__init__(*args, **kwargs) + + def prebuild_arch(self, arch): + super().prebuild_arch(arch) + self.ctx.python_recipe = self + + def include_root(self, arch): + '''The root directory from which to include headers.''' + raise NotImplementedError('Not implemented in TargetPythonRecipe') + + def link_root(self): + raise NotImplementedError('Not implemented in TargetPythonRecipe') + + @property + def major_minor_version_string(self): + parsed_version = packaging.version.parse(self.version) + return f"{parsed_version.major}.{parsed_version.minor}" + + def create_python_bundle(self, dirn, arch): + """ + Create a packaged python bundle in the target directory, by + copying all the modules and standard library to the right + place. + """ + raise NotImplementedError('{} does not implement create_python_bundle'.format(self)) + + def reduce_object_file_names(self, dirn): + """Recursively renames all files named XXX.cpython-...-linux-gnu.so" + to "XXX.so", i.e. removing the erroneous architecture name + coming from the local system. + """ + py_so_files = shprint(sh.find, dirn, '-iname', '*.so') + filens = py_so_files.stdout.decode('utf-8').split('\n')[:-1] + for filen in filens: + file_dirname, file_basename = split(filen) + parts = file_basename.split('.') + if len(parts) <= 2: + continue + # PySide6 libraries end with .abi3.so + if parts[1] == "abi3": + continue + move(filen, join(file_dirname, parts[0] + '.so')) + + +def algsum(alg, filen): + '''Calculate the digest of a file. + ''' + with open(filen, 'rb') as fileh: + digest = getattr(hashlib, alg)(fileh.read()) + + return digest.hexdigest() + + +class FFPyPlayerRecipe(PyProjectRecipe): + version = 'v4.5.1' + url = 'https://github.com/matham/ffpyplayer/archive/{version}.zip' + depends = ['python3', 'sdl2', 'ffmpeg'] + patches = ["setup.py.patch"] + opt_depends = ['openssl', 'ffpyplayer_codecs'] + + def get_recipe_env(self, arch, with_flags_in_cc=True): + env = super().get_recipe_env(arch) + + build_dir = Recipe.get_recipe('ffmpeg', self.ctx).get_build_dir(arch.arch) + env["FFMPEG_INCLUDE_DIR"] = join(build_dir, "include") + env["FFMPEG_LIB_DIR"] = join(build_dir, "lib") + + env["SDL_INCLUDE_DIR"] = join(self.ctx.bootstrap.build_dir, 'jni', 'SDL', 'include') + env["SDL_LIB_DIR"] = join(self.ctx.bootstrap.build_dir, 'libs', arch.arch) + + env["USE_SDL2_MIXER"] = '1' + + # ffpyplayer does not allow to pass more than one include dir for sdl2_mixer (and ATM is + # not needed), so we only pass the first one. + sdl2_mixer_recipe = self.get_recipe('sdl2_mixer', self.ctx) + env["SDL2_MIXER_INCLUDE_DIR"] = sdl2_mixer_recipe.get_include_dirs(arch)[0] + + # NDKPLATFORM and LIBLINK are our switches for detecting Android platform, so can't be empty + # FIXME: We may want to introduce a cleaner approach to this? + env['NDKPLATFORM'] = "NOTNONE" + env['LIBLINK'] = 'NOTNONE' + + # ffmpeg recipe enables GPL components only if ffpyplayer_codecs recipe used. + # Therefor we need to disable libpostproc if skipped. + if 'ffpyplayer_codecs' not in self.ctx.recipe_build_order: + env["CONFIG_POSTPROC"] = '0' + + return env + + +recipe = FFPyPlayerRecipe() \ No newline at end of file diff --git a/recipes/ffpyplayer/setup.py.patch b/recipes/ffpyplayer/setup.py.patch new file mode 100644 index 0000000..6a7d42f --- /dev/null +++ b/recipes/ffpyplayer/setup.py.patch @@ -0,0 +1,15 @@ +--- ffpyplayer/setup.py 2024-06-02 11:10:49.691183467 +0530 ++++ ffpyplayer.mod/setup.py 2024-06-02 11:20:16.220966873 +0530 +@@ -27,12 +27,6 @@ + # This sets whether or not Cython gets added to setup_requires. + declare_cython = False + +-if platform in ('ios', 'android'): +- # NEVER use or declare cython on these platforms +- print('Not using cython on %s' % platform) +- can_use_cython = False +-else: +- declare_cython = True + + src_path = build_path = dirname(__file__) + print(f'Source/build path: {src_path}') \ No newline at end of file diff --git a/recipes/libopus/__init__.py b/recipes/libopus/__init__.py new file mode 100644 index 0000000..1eb4977 --- /dev/null +++ b/recipes/libopus/__init__.py @@ -0,0 +1,22 @@ +from pythonforandroid.recipe import Recipe +from pythonforandroid.toolchain import current_directory, shprint +import sh + + +class OpusRecipe(Recipe): + version = '1.5.2' + url = "https://downloads.xiph.org/releases/opus/opus-{version}.tar.gz" + built_libraries = {'libopus.so': '.libs'} + + def build_arch(self, arch): + with current_directory(self.get_build_dir(arch.arch)): + env = self.get_recipe_env(arch) + flags = [ + '--host=' + arch.command_prefix, + ] + configure = sh.Command('./configure') + shprint(configure, *flags, _env=env) + shprint(sh.make, _env=env) + + +recipe = OpusRecipe() diff --git a/recipes/mffmpeg/__init__.py b/recipes/mffmpeg/__init__.py new file mode 100644 index 0000000..a51a246 --- /dev/null +++ b/recipes/mffmpeg/__init__.py @@ -0,0 +1,152 @@ +from pythonforandroid.toolchain import Recipe, current_directory, shprint +from os.path import exists, join, realpath +import sh + + +class FFMpegRecipe(Recipe): + version = 'n4.3.1' + # Moved to github.com instead of ffmpeg.org to improve download speed + url = 'https://github.com/FFmpeg/FFmpeg/archive/{version}.zip' + depends = ['sdl2'] # Need this to build correct recipe order + opts_depends = ['openssl', 'ffpyplayer_codecs'] + patches = ['patches/configure.patch'] + + def should_build(self, arch): + build_dir = self.get_build_dir(arch.arch) + return not exists(join(build_dir, 'lib', 'libavcodec.so')) + + def prebuild_arch(self, arch): + self.apply_patches(arch) + + def get_recipe_env(self, arch): + env = super().get_recipe_env(arch) + env['NDK'] = self.ctx.ndk_dir + return env + + def build_arch(self, arch): + with current_directory(self.get_build_dir(arch.arch)): + env = arch.get_env() + + # flags = ['--disable-everything'] + flags = [] + cflags = [] + ldflags = [] + + if 'openssl' in self.ctx.recipe_build_order: + flags += [ + '--enable-openssl', + '--enable-nonfree', + '--enable-protocol=https,tls_openssl', + ] + build_dir = Recipe.get_recipe( + 'openssl', self.ctx).get_build_dir(arch.arch) + cflags += ['-I' + build_dir + '/include/', + '-DOPENSSL_API_COMPAT=0x10002000L'] + ldflags += ['-L' + build_dir] + + if 'ffpyplayer_codecs' in self.ctx.recipe_build_order: + # Enable GPL + flags += ['--enable-gpl'] + + # libx264 + flags += ['--enable-libx264'] + build_dir = Recipe.get_recipe( + 'libx264', self.ctx).get_build_dir(arch.arch) + cflags += ['-I' + build_dir + '/include/'] + ldflags += ['-lx264', '-L' + build_dir + '/lib/'] + + # libshine + flags += ['--enable-libshine'] + build_dir = Recipe.get_recipe('libshine', self.ctx).get_build_dir(arch.arch) + cflags += ['-I' + build_dir + '/include/'] + ldflags += ['-lshine', '-L' + build_dir + '/lib/'] + ldflags += ['-lm'] + + # libvpx + flags += ['--enable-libvpx'] + build_dir = Recipe.get_recipe( + 'libvpx', self.ctx).get_build_dir(arch.arch) + cflags += ['-I' + build_dir + '/include/'] + ldflags += ['-lvpx', '-L' + build_dir + '/lib/'] + + # Enable all codecs: + flags += [ + '--enable-parsers', + '--enable-decoders', + '--enable-encoders', + '--enable-muxers', + '--enable-demuxers', + ] + else: + # Enable codecs only for .mp4: + flags += [ + '--enable-parser=aac,ac3,h261,h264,mpegaudio,mpeg4video,mpegvideo,vc1', + '--enable-decoder=aac,h264,mpeg4,mpegvideo', + '--enable-muxer=h264,mov,mp4,mpeg2video', + '--enable-demuxer=aac,h264,m4v,mov,mpegvideo,vc1,rtsp', + ] + + # needed to prevent _ffmpeg.so: version node not found for symbol av_init_packet@LIBAVFORMAT_52 + # /usr/bin/ld: failed to set dynamic section sizes: Bad value + flags += [ + '--disable-symver', + ] + + # disable binaries / doc + flags += [ + # '--disable-programs', + '--disable-doc', + ] + + # other flags: + flags += [ + '--enable-filter=aresample,resample,crop,adelay,volume,scale', + '--enable-protocol=file,http,hls,udp,tcp', + '--enable-small', + '--enable-hwaccels', + '--enable-pic', + '--disable-static', + '--disable-debug', + '--enable-shared', + ] + + if 'arm64' in arch.arch: + arch_flag = 'aarch64' + elif 'x86' in arch.arch: + arch_flag = 'x86' + flags += ['--disable-asm'] + else: + arch_flag = 'arm' + + # android: + flags += [ + '--target-os=android', + '--enable-cross-compile', + '--cross-prefix={}-'.format(arch.target), + '--arch={}'.format(arch_flag), + '--strip={}'.format(self.ctx.ndk.llvm_strip), + '--sysroot={}'.format(self.ctx.ndk.sysroot), + '--enable-neon', + '--prefix={}'.format(realpath('.')), + ] + + if arch_flag == 'arm': + cflags += [ + '-mfpu=vfpv3-d16', + '-mfloat-abi=softfp', + '-fPIC', + ] + + env['CFLAGS'] += ' ' + ' '.join(cflags) + env['LDFLAGS'] += ' ' + ' '.join(ldflags) + + configure = sh.Command('./configure') + shprint(configure, *flags, _env=env) + shprint(sh.make, '-j4', _env=env) + shprint(sh.make, 'install', _env=env) + # copy libs: + sh.cp('-a', sh.glob('./lib/lib*.so'), + self.ctx.get_libs_dir(arch.arch)) + + +recipe = FFMpegRecipe() diff --git a/recipes/mffmpeg/patches/configure.patch b/recipes/mffmpeg/patches/configure.patch new file mode 100644 index 0000000..cacf029 --- /dev/null +++ b/recipes/mffmpeg/patches/configure.patch @@ -0,0 +1,11 @@ +--- ./configure 2020-10-11 19:12:16.759760904 +0200 ++++ ./configure.patch 2020-10-11 19:15:49.059533563 +0200 +@@ -6361,7 +6361,7 @@ + enabled librsvg && require_pkg_config librsvg librsvg-2.0 librsvg-2.0/librsvg/rsvg.h rsvg_handle_render_cairo + enabled librtmp && require_pkg_config librtmp librtmp librtmp/rtmp.h RTMP_Socket + enabled librubberband && require_pkg_config librubberband "rubberband >= 1.8.1" rubberband/rubberband-c.h rubberband_new -lstdc++ && append librubberband_extralibs "-lstdc++" +-enabled libshine && require_pkg_config libshine shine shine/layer3.h shine_encode_buffer ++enabled libshine && require "shine" shine/layer3.h shine_encode_buffer -lshine -lm + enabled libsmbclient && { check_pkg_config libsmbclient smbclient libsmbclient.h smbc_init || + require libsmbclient libsmbclient.h smbc_init -lsmbclient; } + enabled libsnappy && require libsnappy snappy-c.h snappy_compress -lsnappy -lstdc++ \ No newline at end of file diff --git a/recipes/numpy/__init__.py b/recipes/numpy/__init__.py new file mode 100644 index 0000000..55a0279 --- /dev/null +++ b/recipes/numpy/__init__.py @@ -0,0 +1,75 @@ +from pythonforandroid.recipe import CompiledComponentsPythonRecipe +from pythonforandroid.logger import shprint, info +from pythonforandroid.util import current_directory +from multiprocessing import cpu_count +from os.path import join +import glob +import sh +import shutil + + +class NumpyRecipe(CompiledComponentsPythonRecipe): + + version = '1.22.3' + url = 'https://pypi.python.org/packages/source/n/numpy/numpy-{version}.zip' + site_packages_name = 'numpy' + depends = ['setuptools', 'cython'] + install_in_hostpython = True + call_hostpython_via_targetpython = False + + patches = [ + join("patches", "remove-default-paths.patch"), + join("patches", "add_libm_explicitly_to_build.patch"), + join("patches", "ranlib.patch"), + ] + + def get_recipe_env(self, arch=None, with_flags_in_cc=True): + env = super().get_recipe_env(arch, with_flags_in_cc) + + # _PYTHON_HOST_PLATFORM declares that we're cross-compiling + # and avoids issues when building on macOS for Android targets. + env["_PYTHON_HOST_PLATFORM"] = arch.command_prefix + + # NPY_DISABLE_SVML=1 allows numpy to build for non-AVX512 CPUs + # See: https://github.com/numpy/numpy/issues/21196 + env["NPY_DISABLE_SVML"] = "1" + + return env + + def _build_compiled_components(self, arch): + info('Building compiled components in {}'.format(self.name)) + + env = self.get_recipe_env(arch) + with current_directory(self.get_build_dir(arch.arch)): + hostpython = sh.Command(self.hostpython_location) + shprint(hostpython, 'setup.py', self.build_cmd, '-v', + _env=env, *self.setup_extra_args) + build_dir = glob.glob('build/lib.*')[0] + shprint(sh.find, build_dir, '-name', '"*.o"', '-exec', + env['STRIP'], '{}', ';', _env=env) + + def _rebuild_compiled_components(self, arch, env): + info('Rebuilding compiled components in {}'.format(self.name)) + + hostpython = sh.Command(self.real_hostpython_location) + shprint(hostpython, 'setup.py', 'clean', '--all', '--force', _env=env) + shprint(hostpython, 'setup.py', self.build_cmd, '-v', _env=env, + *self.setup_extra_args) + + def build_compiled_components(self, arch): + self.setup_extra_args = ['-j', str(cpu_count())] + self._build_compiled_components(arch) + self.setup_extra_args = [] + + def rebuild_compiled_components(self, arch, env): + self.setup_extra_args = ['-j', str(cpu_count())] + self._rebuild_compiled_components(arch, env) + self.setup_extra_args = [] + + def get_hostrecipe_env(self, arch): + env = super().get_hostrecipe_env(arch) + env['RANLIB'] = shutil.which('ranlib') + return env + + +recipe = NumpyRecipe() diff --git a/recipes/numpy/patches/add_libm_explicitly_to_build.patch b/recipes/numpy/patches/add_libm_explicitly_to_build.patch new file mode 100644 index 0000000..f9ba9e9 --- /dev/null +++ b/recipes/numpy/patches/add_libm_explicitly_to_build.patch @@ -0,0 +1,20 @@ +diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py +index 66c07c9..d34bd93 100644 +--- a/numpy/linalg/setup.py ++++ b/numpy/linalg/setup.py +@@ -46,6 +46,7 @@ def configuration(parent_package='', top_path=None): + sources=['lapack_litemodule.c', get_lapack_lite_sources], + depends=['lapack_lite/f2c.h'], + extra_info=lapack_info, ++ libraries=['m'], + ) + + # umath_linalg module +@@ -54,7 +54,7 @@ def configuration(parent_package='', top_path=None): + sources=['umath_linalg.c.src', get_lapack_lite_sources], + depends=['lapack_lite/f2c.h'], + extra_info=lapack_info, +- libraries=['npymath'], ++ libraries=['npymath', 'm'], + ) + return config diff --git a/recipes/numpy/patches/ranlib.patch b/recipes/numpy/patches/ranlib.patch new file mode 100644 index 0000000..c0b5dad --- /dev/null +++ b/recipes/numpy/patches/ranlib.patch @@ -0,0 +1,11 @@ +diff -Naur numpy.orig/numpy/distutils/unixccompiler.py numpy/numpy/distutils/unixccompiler.py +--- numpy.orig/numpy/distutils/unixccompiler.py 2022-05-28 10:22:10.000000000 +0200 ++++ numpy/numpy/distutils/unixccompiler.py 2022-05-28 10:22:24.000000000 +0200 +@@ -124,6 +124,7 @@ + # platform intelligence here to skip ranlib if it's not + # needed -- or maybe Python's configure script took care of + # it for us, hence the check for leading colon. ++ self.ranlib = [os.environ.get('RANLIB')] + if self.ranlib: + display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), + output_filename) diff --git a/recipes/numpy/patches/remove-default-paths.patch b/recipes/numpy/patches/remove-default-paths.patch new file mode 100644 index 0000000..3581f0f --- /dev/null +++ b/recipes/numpy/patches/remove-default-paths.patch @@ -0,0 +1,28 @@ +diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py +index fc7018a..7b514bc 100644 +--- a/numpy/distutils/system_info.py ++++ b/numpy/distutils/system_info.py +@@ -340,10 +340,10 @@ if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: + default_include_dirs.append(os.path.join(sys.prefix, 'include')) + default_src_dirs.append(os.path.join(sys.prefix, 'src')) + +-default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] +-default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)] +-default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] +-default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] ++default_lib_dirs = [] #[_m for _m in default_lib_dirs if os.path.isdir(_m)] ++default_runtime_dirs =[] # [_m for _m in default_runtime_dirs if os.path.isdir(_m)] ++default_include_dirs =[] # [_m for _m in default_include_dirs if os.path.isdir(_m)] ++default_src_dirs =[] # [_m for _m in default_src_dirs if os.path.isdir(_m)] + + so_ext = get_shared_lib_extension() + +@@ -814,7 +814,7 @@ class system_info(object): + path = self.get_paths(self.section, key) + if path == ['']: + path = [] +- return path ++ return [] + + def get_include_dirs(self, key='include_dirs'): + return self.get_paths(self.section, key) diff --git a/recipes/opusfile/__init__.py b/recipes/opusfile/__init__.py new file mode 100644 index 0000000..27d0f29 --- /dev/null +++ b/recipes/opusfile/__init__.py @@ -0,0 +1,46 @@ +from pythonforandroid.recipe import Recipe +from pythonforandroid.toolchain import current_directory, shprint +import sh +import os +import time + + +class OpusFileRecipe(Recipe): + version = "0.12" + url = "https://downloads.xiph.org/releases/opus/opusfile-{version}.tar.gz" + depends = ['libogg'] + built_libraries = {'libopusfile.so': '.libs'} + + def build_arch(self, arch): + with current_directory(self.get_build_dir(arch.arch)): + env = self.get_recipe_env(arch) + flags = [ + "--host=" + arch.command_prefix, + "--disable-http", + "--disable-examples", + "--disable-doc", + "--disable-largefile", + ] + + cwd = os.getcwd() + ogg_include_path = cwd.replace("opusfile", "libogg") + env["CPPFLAGS"] += f" -I{ogg_include_path}/include" + + # libogg_recipe = Recipe.get_recipe('libogg', self.ctx) + # env['CFLAGS'] += libogg_recipe.include_flags(arch) + + # openssl_recipe = Recipe.get_recipe('openssl', self.ctx) + # env['CFLAGS'] += openssl_recipe.include_flags(arch) + # env['LDFLAGS'] += openssl_recipe.link_dirs_flags(arch) + # env['LIBS'] = openssl_recipe.link_libs_flags() + + from rich.pretty import pprint + pprint(env) + time.sleep(5) + + configure = sh.Command('./configure') + shprint(configure, *flags, _env=env) + shprint(sh.make, _env=env) + + +recipe = OpusFileRecipe() diff --git a/recipes/pycodec2/__init__.py b/recipes/pycodec2/__init__.py new file mode 100644 index 0000000..6ad05bd --- /dev/null +++ b/recipes/pycodec2/__init__.py @@ -0,0 +1,57 @@ +from pythonforandroid.recipe import CythonRecipe, IncludedFilesBehaviour +from pythonforandroid.toolchain import current_directory, shprint +from os.path import join +import sh + +# class PyCodec2Recipe(IncludedFilesBehaviour, CythonRecipe): +class PyCodec2Recipe(CythonRecipe): + url = "https://github.com/markqvist/pycodec2/archive/refs/heads/main.zip" + # src_filename = "../../../pycodec2" + depends = ["setuptools", "numpy", "Cython", "codec2"] + call_hostpython_via_targetpython = False + + def get_recipe_env(self, arch, with_flags_in_cc=True): + """ + Adds codec2 recipe to include and library path. + """ + env = super().get_recipe_env(arch, with_flags_in_cc) + + codec2_recipe = self.get_recipe('codec2', self.ctx) + env['CFLAGS'] += codec2_recipe.include_flags(arch) +" -l:libcodec2.so" + env['LDFLAGS'] += ' -L{}'.format(self.ctx.get_libs_dir(arch.arch)) + env['LDFLAGS'] += ' -L{}'.format(self.ctx.libs_dir) + env['LDFLAGS'] += codec2_recipe.link_dirs_flags(arch) + + return env + + def build_arch(self, arch): + super().build_arch(arch) + with current_directory(self.get_build_dir(arch.arch)): + pass + # print(arch.arch) + # print(arch) + # shprint(sh.Command("pwd")) + # shprint(sh.Command("ls")) + + # pe_args = ["--replace-needed", "libcodec2.so.1.2", "libcodec2.so", "build/lib.linux-x86_64-3.11/pycodec2/pycodec2.cpython-311-x86_64-linux-gnu.so"] + # shprint(sh.Command("patchelf"), *pe_args) + + # pe_args = ["--replace-needed", "libcodec2.so.1.2", "libcodec2.so", f"../../../../python-installs/sideband/{arch.arch}/pycodec2/pycodec2.cpython-311-x86_64-linux-gnu.so"] + # shprint(sh.Command("patchelf"), *pe_args) + + # ../../../../python-installs/sideband/armeabi-v7a/pycodec2/pycodec2.cpython-311-x86_64-linux-gnu.so + # sbapp/.buildozer/android/platform/build-arm64-v8a_armeabi-v7a/build/other_builds/pycodec2/armeabi-v7a__ndk_target_24/pycodec2/build/lib.linux-x86_64-3.11/pycodec2/pycodec2.cpython-311-x86_64-linux-gnu.so + # sbapp/.buildozer/android/platform/build-arm64-v8a_armeabi-v7a/build/python-installs/sideband/armeabi-v7a/pycodec2/pycodec2.cpython-311-x86_64-linux-gnu.so + # print("=========================") + # input() + + + def postbuild_arch(self, arch): + super().postbuild_arch(arch) + +recipe = PyCodec2Recipe() + +# patchelf --replace-needed libcodec2.so.1.2 libcodec2.so sbapp/.buildozer/android/platform/build-arm64-v8a_armeabi-v7a/dists/sideband/_python_bundle__arm64-v8a/_python_bundle/site-packages/pycodec2/pycodec2.so +# patchelf --replace-needed libcodec2.so.1.2 libcodec2.so sbapp/.buildozer/android/platform/build-arm64-v8a_armeabi-v7a/dists/sideband/_python_bundle__armeabi-v7a/_python_bundle/site-packages/pycodec2/pycodec2.so + +# patchelf --replace-needed libcodec2.so.1.2 libcodec2.so sbapp/.buildozer/android/platform/build-arm64-v8a_armeabi-v7a/dists/sideband/_python_bundle__arm64-v8a/_python_bundle/site-packages/pycodec2/pycodec2.so; patchelf --replace-needed libcodec2.so.1.2 libcodec2.so sbapp/.buildozer/android/platform/build-arm64-v8a_armeabi-v7a/dists/sideband/_python_bundle__armeabi-v7a/_python_bundle/site-packages/pycodec2/pycodec2.so \ No newline at end of file diff --git a/sbapp/Makefile b/sbapp/Makefile index 0b6b474..ed7691c 100644 --- a/sbapp/Makefile +++ b/sbapp/Makefile @@ -15,7 +15,7 @@ cleanlibs: cleanall: clean cleanlibs -pacthfiles: patchsdl injectxml +pacthfiles: patchsdl injectxml patchpycodec2 patchsdl: # Pach USB HID behaviour @@ -28,6 +28,10 @@ patchsdl: cp patches/PythonService.java .buildozer/android/platform/build-arm64-v8a_armeabi-v7a/build/bootstrap_builds/sdl2/src/main/java/org/kivy/android/PythonService.java cp patches/PythonService.java .buildozer/android/platform/build-arm64-v8a_armeabi-v7a/dists/sideband/src/main/java/org/kivy/android/PythonService.java +patchpycodec2: + patchelf --replace-needed libcodec2.so.1.2 libcodec2.so .buildozer/android/platform/build-arm64-v8a_armeabi-v7a/dists/sideband/_python_bundle__arm64-v8a/_python_bundle/site-packages/pycodec2/pycodec2.so + patchelf --replace-needed libcodec2.so.1.2 libcodec2.so .buildozer/android/platform/build-arm64-v8a_armeabi-v7a/dists/sideband/_python_bundle__armeabi-v7a/_python_bundle/site-packages/pycodec2/pycodec2.so + injectxml: # mkdir /home/markqvist/.local/lib/python3.11/site-packages/pythonforandroid/bootstraps/sdl2/build/src/main/xml # Inject XML on arm64-v8a @@ -62,7 +66,7 @@ fetchshare: cp ../../dist_archive/lxmf-*-py3-none-any.whl ./share/pkg/ cp ../../dist_archive/nomadnet-*-py3-none-any.whl ./share/pkg/ cp ../../dist_archive/rnsh-*-py3-none-any.whl ./share/pkg/ - cp ../../dist_archive/sbapp-*-py3-none-any.whl ./share/pkg/ +# cp ../../dist_archive/sbapp-*-py3-none-any.whl ./share/pkg/ cp ../../dist_archive/RNode_Firmware_*_Source.zip ./share/pkg/ zip --junk-paths ./share/pkg/example_plugins.zip ../docs/example_plugins/*.py cp -r ../../dist_archive/reticulum.network ./share/mirrors/ diff --git a/sbapp/assets/fonts/DefaultInput.ttf b/sbapp/assets/fonts/DefaultInput.ttf new file mode 100644 index 0000000..6af80b7 Binary files /dev/null and b/sbapp/assets/fonts/DefaultInput.ttf differ diff --git a/sbapp/assets/fonts/EmojiScaled.ttf b/sbapp/assets/fonts/EmojiScaled.ttf new file mode 100644 index 0000000..7f4cb47 Binary files /dev/null and b/sbapp/assets/fonts/EmojiScaled.ttf differ diff --git a/sbapp/assets/fonts/NotoSans-Bold.ttf b/sbapp/assets/fonts/NotoSans-Bold.ttf index d84248e..1b9d109 100644 Binary files a/sbapp/assets/fonts/NotoSans-Bold.ttf and b/sbapp/assets/fonts/NotoSans-Bold.ttf differ diff --git a/sbapp/assets/fonts/NotoSans-BoldItalic.ttf b/sbapp/assets/fonts/NotoSans-BoldItalic.ttf index 3a34c4c..81ab4f6 100644 Binary files a/sbapp/assets/fonts/NotoSans-BoldItalic.ttf and b/sbapp/assets/fonts/NotoSans-BoldItalic.ttf differ diff --git a/sbapp/assets/fonts/NotoSans-Italic.ttf b/sbapp/assets/fonts/NotoSans-Italic.ttf index c40c356..d953762 100644 Binary files a/sbapp/assets/fonts/NotoSans-Italic.ttf and b/sbapp/assets/fonts/NotoSans-Italic.ttf differ diff --git a/sbapp/assets/fonts/NotoSans-Medium.ttf b/sbapp/assets/fonts/NotoSans-Medium.ttf index a799b74..9cfc357 100644 Binary files a/sbapp/assets/fonts/NotoSans-Medium.ttf and b/sbapp/assets/fonts/NotoSans-Medium.ttf differ diff --git a/sbapp/assets/fonts/NotoSans-Regular.ttf b/sbapp/assets/fonts/NotoSans-Regular.ttf index fa4cff5..6af80b7 100644 Binary files a/sbapp/assets/fonts/NotoSans-Regular.ttf and b/sbapp/assets/fonts/NotoSans-Regular.ttf differ diff --git a/sbapp/assets/fonts/NotoSansHebrew-Bold.ttf b/sbapp/assets/fonts/NotoSansHebrew-Bold.ttf index 04e5eb0..eb7b0d6 100644 Binary files a/sbapp/assets/fonts/NotoSansHebrew-Bold.ttf and b/sbapp/assets/fonts/NotoSansHebrew-Bold.ttf differ diff --git a/sbapp/assets/fonts/NotoSansHebrew-Medium.ttf b/sbapp/assets/fonts/NotoSansHebrew-Medium.ttf index a7a80a9..c21d600 100644 Binary files a/sbapp/assets/fonts/NotoSansHebrew-Medium.ttf and b/sbapp/assets/fonts/NotoSansHebrew-Medium.ttf differ diff --git a/sbapp/assets/fonts/NotoSansHebrew-Regular.ttf b/sbapp/assets/fonts/NotoSansHebrew-Regular.ttf index ca7a0fd..b353cb5 100644 Binary files a/sbapp/assets/fonts/NotoSansHebrew-Regular.ttf and b/sbapp/assets/fonts/NotoSansHebrew-Regular.ttf differ diff --git a/sbapp/assets/fonts/NotoSansJP-Regular.ttf b/sbapp/assets/fonts/NotoSansJP-Regular.ttf index 1583096..7c5dcac 100644 Binary files a/sbapp/assets/fonts/NotoSansJP-Regular.ttf and b/sbapp/assets/fonts/NotoSansJP-Regular.ttf differ diff --git a/sbapp/assets/fonts/NotoSansKR-Regular.ttf b/sbapp/assets/fonts/NotoSansKR-Regular.ttf index 1b14d32..9b4aa78 100644 Binary files a/sbapp/assets/fonts/NotoSansKR-Regular.ttf and b/sbapp/assets/fonts/NotoSansKR-Regular.ttf differ diff --git a/sbapp/assets/fonts/NotoSansSC-Regular.ttf b/sbapp/assets/fonts/NotoSansSC-Regular.ttf index 4d4cadb..91d4ad2 100644 Binary files a/sbapp/assets/fonts/NotoSansSC-Regular.ttf and b/sbapp/assets/fonts/NotoSansSC-Regular.ttf differ diff --git a/sbapp/assets/io.unsigned.sideband.desktop b/sbapp/assets/io.unsigned.sideband.desktop index a25f6fb..6a7d437 100644 --- a/sbapp/assets/io.unsigned.sideband.desktop +++ b/sbapp/assets/io.unsigned.sideband.desktop @@ -1,12 +1,13 @@ +# Entry version 20240630 [Desktop Entry] -Comment[en_US]= -Comment= +Comment[en_US]=Messaging, telemetry and remote control over LXMF +Comment=Messaging, telemetry and remote control over LXMF Encoding=UTF-8 Exec=sideband -GenericName[en_US]=LXMF messaging, telemetry and remote control -GenericName=LXMF messaging, telemetry and remote control +GenericName[en_US]=LXMF client +GenericName=LXMF client Icon=io.unsigned.sideband.png -Categories=InstantMessaging,Network +Categories=Utility MimeType= Name[en_US]=Sideband Name=Sideband diff --git a/sbapp/buildozer.spec b/sbapp/buildozer.spec index 1adf183..b1ad31f 100644 --- a/sbapp/buildozer.spec +++ b/sbapp/buildozer.spec @@ -6,31 +6,32 @@ package.domain = io.unsigned source.dir = . source.include_exts = py,png,jpg,jpeg,webp,ttf,kv,pyi,typed,so,0,1,2,3,atlas,frag,html,css,js,whl,zip,gz,woff2,pdf,epub,pgm source.include_patterns = assets/*,assets/fonts/*,share/* -source.exclude_patterns = app_storage/*,venv/*,Makefile,./Makefil*,requirements,precompiled/*,parked/*,./setup.py,Makef*,./Makefile,Makefile +source.exclude_patterns = app_storage/*,venv/*,Makefile,./Makefil*,requirements,precompiled/*,parked/*,./setup.py,Makef*,./Makefile,Makefile,bin/*,build/*,dist/*,__pycache__/* version.regex = __version__ = ['"](.*)['"] version.filename = %(source.dir)s/main.py -android.numeric_version = 20240522 +android.numeric_version = 20240911 -# Cryptography recipe is currently broken, using RNS-internal crypto for now. Since -# relevant PRs have now been merged in Kivy/P4A, the next release will hopefully allow -# building a non-ancient PyCa/Cryptography distribution again. When this happens, add -# the "cryptography" dependency back in here. -requirements = kivy==2.3.0,libbz2,pillow==10.2.0,qrcode==7.3.1,usb4a,usbserial4a,libwebp,android,able_recipe +requirements = kivy==2.3.0,libbz2,pillow==10.2.0,qrcode==7.3.1,usb4a,usbserial4a,libwebp,libogg,libopus,opusfile,numpy,cryptography,ffpyplayer,codec2,pycodec2,sh,pynacl,android,able_recipe -p4a.local_recipes = ../Others/python-for-android/pythonforandroid/recipes +android.gradle_dependencies = com.android.support:support-compat:28.0.0 +#android.enable_androidx = True +#android.add_aars = patches/support-compat-28.0.0.aar + +p4a.local_recipes = ../recipes/ icon.filename = %(source.dir)s/assets/icon.png presplash.filename = %(source.dir)s/assets/presplash_small.png android.presplash_color = #00000000 -# TODO: Fix +# TODO: Fix inability to set "user" orientation from spec +# This is currently handled by patching the APK manifest orientation = portrait fullscreen = 0 -android.permissions = INTERNET,POST_NOTIFICATIONS,WAKE_LOCK,FOREGROUND_SERVICE,CHANGE_WIFI_MULTICAST_STATE,BLUETOOTH, BLUETOOTH_ADMIN, BLUETOOTH_SCAN, BLUETOOTH_CONNECT, BLUETOOTH_ADVERTISE,ACCESS_NETWORK_STATE,ACCESS_FINE_LOCATION,ACCESS_COARSE_LOCATION,MANAGE_EXTERNAL_STORAGE,ACCESS_BACKGROUND_LOCATION +android.permissions = INTERNET,POST_NOTIFICATIONS,WAKE_LOCK,FOREGROUND_SERVICE,CHANGE_WIFI_MULTICAST_STATE,BLUETOOTH, BLUETOOTH_ADMIN, BLUETOOTH_SCAN, BLUETOOTH_CONNECT, BLUETOOTH_ADVERTISE,ACCESS_NETWORK_STATE,ACCESS_FINE_LOCATION,ACCESS_COARSE_LOCATION,MANAGE_EXTERNAL_STORAGE,ACCESS_BACKGROUND_LOCATION,RECORD_AUDIO -android.api = 30 +android.api = 31 android.minapi = 24 android.ndk = 25b android.skip_update = False @@ -44,7 +45,9 @@ android.add_gradle_repositories = flatDir { dirs("../../../../../../patches") } services = sidebandservice:services/sidebandservice.py:foreground android.whitelist = lib-dynload/termios.so android.manifest.intent_filters = patches/intent-filter.xml -android.add_aars = patches/support-compat-28.0.0.aar + +# android.add_libs_armeabi_v7a = ../libs/armeabi/*.so* +# android.add_libs_arm64_v8a = ../libs/arm64/*.so* [buildozer] log_level = 2 diff --git a/sbapp/freeze.py b/sbapp/freeze.py new file mode 100644 index 0000000..81f5ad8 --- /dev/null +++ b/sbapp/freeze.py @@ -0,0 +1,95 @@ +import os +import re +import setuptools +import cx_Freeze +from pathlib import Path + +build_appimage = True + +def get_version() -> str: + version_file = os.path.join( + os.path.dirname(__file__), "main.py" + ) + + version_file_data = open(version_file, "rt", encoding="utf-8").read() + version_regex = r"(?<=^__version__ = ['\"])[^'\"]+(?=['\"]$)" + try: + version = re.findall(version_regex, version_file_data, re.M)[0] + return version + except IndexError: + raise ValueError(f"Unable to find version string in {version_file}.") + +def get_variant() -> str: + version_file = os.path.join( + os.path.dirname(__file__), "main.py" + ) + + version_file_data = open(version_file, "rt", encoding="utf-8").read() + version_regex = r"(?<=^__variant__ = ['\"])[^'\"]+(?=['\"]$)" + try: + version = re.findall(version_regex, version_file_data, re.M)[0] + return version + except IndexError: + raise ValueError(f"Unable to find version string in {version_file}.") + +__version__ = get_version() +__variant__ = get_variant() + +def glob_paths(pattern): + out_files = [] + src_path = os.path.join(os.path.dirname(__file__), "kivymd") + + for root, dirs, files in os.walk(src_path): + for file in files: + if file.endswith(pattern): + filepath = os.path.join(str(Path(*Path(root).parts[1:])), file) + out_files.append(filepath.split(f"kivymd{os.sep}")[1]) + + return out_files + +package_data = { +"": [ + "assets/*", + "assets/fonts/*", + "assets/geoids/*", + "kivymd/fonts/*", + "kivymd/images/*", + "kivymd/*", + "mapview/icons/*", + *glob_paths(".kv") + ] +} + +print("Freezing Sideband "+__version__+" "+__variant__) + +if build_appimage: + global_excludes = [".buildozer", "build", "dist"] + # Dependencies are automatically detected, but they might need fine-tuning. + appimage_options = { + "target_name": "Sideband", + "target_version": __version__+" "+__variant__, + "include_files": [], + "excludes": [], + "packages": ["kivy"], + "zip_include_packages": [], + "bin_path_excludes": global_excludes, + } + + cx_Freeze.setup( + name="Sideband", + version=__version__, + author="Mark Qvist", + author_email="mark@unsigned.io", + url="https://unsigned.io/sideband", + executables=[ + cx_Freeze.Executable( + script="main.py", + base="console", + target_name="Sideband", + shortcut_name="Sideband", + icon="assets/icon.png", + copyright="Copyright (c) 2024 Mark Qvist", + ), + ], + options={"build_appimage": appimage_options}, + ) \ No newline at end of file diff --git a/sbapp/kivymd/uix/dialog/dialog.py b/sbapp/kivymd/uix/dialog/dialog.py index 9f2db53..73a5675 100755 --- a/sbapp/kivymd/uix/dialog/dialog.py +++ b/sbapp/kivymd/uix/dialog/dialog.py @@ -413,7 +413,7 @@ class MDDialog(BaseDialog): and defaults to `[]`. """ - width_offset = NumericProperty(dp(48)) + width_offset = NumericProperty(dp(32)) """ Dialog offset from device width. @@ -601,7 +601,7 @@ class MDDialog(BaseDialog): self.width = min(dp(560), Window.width - self.width_offset) elif self.size_hint == [1, 1] and DEVICE_TYPE == "mobile": self.size_hint = (None, None) - self.width = min(dp(280), Window.width - self.width_offset) + self.width = min(dp(560), Window.width - self.width_offset) if not self.title: self._spacer_top = 0 @@ -634,7 +634,7 @@ class MDDialog(BaseDialog): self.width = max( self.height + self.width_offset, min( - dp(560) if DEVICE_TYPE != "mobile" else dp(280), + dp(560) if DEVICE_TYPE != "mobile" else dp(560), Window.width - self.width_offset, ), ) diff --git a/sbapp/main.py b/sbapp/main.py index f7a5f60..f708dec 100644 --- a/sbapp/main.py +++ b/sbapp/main.py @@ -1,6 +1,6 @@ __debug_build__ = False __disable_shaders__ = False -__version__ = "0.8.5" +__version__ = "0.9.5" __variant__ = "beta" import sys @@ -9,6 +9,8 @@ parser = argparse.ArgumentParser(description="Sideband LXMF Client") parser.add_argument("-v", "--verbose", action='store_true', default=False, help="increase logging verbosity") parser.add_argument("-c", "--config", action='store', default=None, help="specify path of config directory") parser.add_argument("-d", "--daemon", action='store_true', default=False, help="run as a daemon, without user interface") +parser.add_argument("--export-settings", action='store', default=None, help="export application settings to file") +parser.add_argument("--import-settings", action='store', default=None, help="import application settings from file") parser.add_argument("--version", action="version", version="sideband {version}".format(version=__version__)) args = parser.parse_args() sys.argv = [sys.argv[0]] @@ -22,8 +24,75 @@ import base64 import threading import RNS.vendor.umsgpack as msgpack +if args.export_settings: + from .sideband.core import SidebandCore + sideband = SidebandCore( + None, + config_path=args.config, + is_client=False, + verbose=(args.verbose or __debug_build__), + is_daemon=True, + load_config_only=True, + ) + + sideband.version_str = "v"+__version__+" "+__variant__ + + import json + export = sideband.config.copy() + for k in export: + if isinstance(export[k], bytes): + export[k] = RNS.hexrep(export[k], delimit=False) + try: + export_path = os.path.expanduser(args.export_settings) + with open(export_path, "wb") as export_file: + export_file.write(json.dumps(export, indent=4).encode("utf-8")) + print(f"Application settings written to {export_path}") + exit(0) + + except Exception as e: + print(f"Could not write application settings to {export_path}. The contained exception was:\n{e}") + exit(1) + +elif args.import_settings: + from .sideband.core import SidebandCore + sideband = SidebandCore( + None, + config_path=args.config, + is_client=False, + verbose=(args.verbose or __debug_build__), + is_daemon=True, + load_config_only=True, + ) + + sideband.version_str = "v"+__version__+" "+__variant__ + + import json + addr_fields = ["lxmf_propagation_node", "last_lxmf_propagation_node", "nn_home_node", "telemetry_collector"] + try: + import_path = os.path.expanduser(args.import_settings) + imported = None + with open(import_path, "rb") as import_file: + json_data = import_file.read().decode("utf-8") + imported = json.loads(json_data) + for k in imported: + if k in addr_fields and imported[k] != None: + imported[k] = bytes.fromhex(imported[k]) + if len(imported[k]) != RNS.Reticulum.TRUNCATED_HASHLENGTH//8: + raise ValueError(f"Invalid hash length for {RNS.prettyhexrep(imported[k])}") + + if imported: + sideband.config = imported + sideband.save_configuration() + while sideband.saving_configuration: + time.sleep(0.1) + print(f"Application settings imported from {import_path}") + exit(0) + + except Exception as e: + print(f"Could not import application settings from {import_path}. The contained exception was:\n{e}") + exit(1) + if not args.daemon: - import plyer from kivy.logger import Logger, LOG_LEVELS from PIL import Image as PilImage import io @@ -74,12 +143,14 @@ if args.daemon: NewConv = DaemonElement; Telemetry = DaemonElement; ObjectDetails = DaemonElement; Announces = DaemonElement; Messages = DaemonElement; ts_format = DaemonElement; messages_screen_kv = DaemonElement; plyer = DaemonElement; multilingual_markup = DaemonElement; ContentNavigationDrawer = DaemonElement; DrawerList = DaemonElement; IconListItem = DaemonElement; escape_markup = DaemonElement; + SoundLoader = DaemonElement; else: from kivymd.app import MDApp app_superclass = MDApp from kivy.core.window import Window from kivy.core.clipboard import Clipboard + from kivy.core.audio import SoundLoader from kivy.base import EventLoop from kivy.clock import Clock from kivy.lang.builder import Builder @@ -103,8 +174,9 @@ else: import kivy.core.image kivy.core.image.Logger = redirect_log() - if RNS.vendor.platformutils.get_platform() == "android": + if RNS.vendor.platformutils.is_android(): from sideband.core import SidebandCore + import plyer from ui.layouts import * from ui.conversations import Conversations, MsgSync, NewConv @@ -113,7 +185,7 @@ else: from ui.announces import Announces from ui.messages import Messages, ts_format, messages_screen_kv from ui.helpers import ContentNavigationDrawer, DrawerList, IconListItem - from ui.helpers import multilingual_markup + from ui.helpers import multilingual_markup, mdc from kivymd.toast import toast from jnius import cast @@ -122,11 +194,15 @@ else: from android.permissions import request_permissions, check_permission from android.storage import primary_external_storage_path, secondary_external_storage_path + import pyogg + from pydub import AudioSegment + from kivymd.utils.set_bars_colors import set_bars_colors android_api_version = autoclass('android.os.Build$VERSION').SDK_INT else: from .sideband.core import SidebandCore + import sbapp.plyer as plyer from .ui.layouts import * from .ui.conversations import Conversations, MsgSync, NewConv @@ -135,7 +211,10 @@ else: from .ui.objectdetails import ObjectDetails from .ui.messages import Messages, ts_format, messages_screen_kv from .ui.helpers import ContentNavigationDrawer, DrawerList, IconListItem - from .ui.helpers import multilingual_markup + from .ui.helpers import multilingual_markup, mdc + + import sbapp.pyogg as pyogg + from sbapp.pydub import AudioSegment class toast: def __init__(self, *kwargs): @@ -193,12 +272,16 @@ class SidebandApp(MDApp): else: self.sideband = SidebandCore(self, config_path=self.config_path, is_client=False, verbose=(args.verbose or __debug_build__)) + self.sideband.version_str = "v"+__version__+" "+__variant__ + self.set_ui_theme() self.font_config() self.update_input_language() self.dark_theme_text_color = dark_theme_text_color self.conversations_view = None + self.include_conversations = True + self.include_objects = False self.messages_view = None self.map = None self.map_layer = None @@ -223,6 +306,14 @@ class SidebandApp(MDApp): self.attach_path = None self.attach_type = None + self.attach_dialog = None + self.rec_dialog = None + self.last_msg_audio = None + self.msg_sound = None + self.audio_msg_mode = LXMF.AM_OPUS_OGG + self.compat_error_dialog = None + self.rec_dialog_is_open = True + self.key_ptt_down = False Window.softinput_mode = "below_target" self.icon = self.sideband.asset_dir+"/icon.png" @@ -386,6 +477,9 @@ class SidebandApp(MDApp): LabelBase.register(name="emoji", fn_regular=fb_path+"NotoEmoji-Regular.ttf") + LabelBase.register(name="defaultinput", + fn_regular=fb_path+"DefaultInput.ttf") + LabelBase.register(name="combined", fn_regular=fb_path+"NotoSans-Regular.ttf", fn_bold=fb_path+"NotoSans-Bold.ttf", @@ -395,12 +489,20 @@ class SidebandApp(MDApp): def update_input_language(self): language = self.sideband.config["input_language"] if language == None: - self.input_font = "Roboto" - RNS.log("Setting input language to default set", RNS.LOG_DEBUG) + self.input_font = "defaultinput" else: self.input_font = language - RNS.log("Setting input language to "+str(language), RNS.LOG_DEBUG) + + RNS.log("Setting input language to "+str(self.input_font), RNS.LOG_DEBUG) + # def modify_input_font(self, ids): + # BIND_CLASSES = ["kivymd.uix.textfield.textfield.MDTextField",] + # for e in ids: + # te = ids[e] + # ts = str(te).split(" ")[0].replace("<", "") + # if ts in BIND_CLASSES: + # RNS.log("MODIFYING "+str(e)+" to "+self.input_font) + # te.font_name = self.input_font def update_ui_colors(self): if self.sideband.config["dark_ui"]: @@ -419,6 +521,7 @@ class SidebandApp(MDApp): self.color_hover = colors["Light"]["AppBar"] self.apply_eink_mods() + self.set_bars_colors() def update_ui_theme(self): if self.sideband.config["dark_ui"]: @@ -440,11 +543,54 @@ class SidebandApp(MDApp): def set_bars_colors(self): if RNS.vendor.platformutils.get_platform() == "android": - set_bars_colors( - self.theme_cls.primary_color, # status bar color - [0,0,0,0], # navigation bar color - "Light", # icons color of status bar - ) + + def set_navicons(set_dark_icons = False): + from android.runnable import run_on_ui_thread + from jnius import autoclass + WindowManager = autoclass("android.view.WindowManager$LayoutParams") + activity = autoclass("org.kivy.android.PythonActivity").mActivity + View = autoclass("android.view.View") + + def uit_exec(): + window = activity.getWindow() + window.clearFlags(WindowManager.FLAG_TRANSLUCENT_STATUS) + window.addFlags(WindowManager.FLAG_DRAWS_SYSTEM_BAR_BACKGROUNDS) + + if set_dark_icons: + window.getDecorView().setSystemUiVisibility(View.SYSTEM_UI_FLAG_LIGHT_NAVIGATION_BAR) + else: + window.getDecorView().setSystemUiVisibility(0) + + return run_on_ui_thread(uit_exec)() + + if self.sideband.config["dark_ui"]: + if self.sideband.config["eink_mode"] == True: + set_bars_colors( + self.theme_cls.primary_color, # status bar color + self.theme_cls.bg_light, # nav bar color + "Light", # icons color of status bar + ) + else: + set_bars_colors( + self.theme_cls.primary_color, + self.theme_cls.bg_darkest, + "Light") + else: + if self.sideband.config["eink_mode"] == True: + set_bars_colors( + self.theme_cls.primary_color, + self.theme_cls.bg_light, + "Light") + else: + set_bars_colors( + self.theme_cls.primary_color, + self.theme_cls.bg_darkest, + "Light") + + try: + set_navicons(set_dark_icons=True) + except Exception as e: + RNS.trace_exception(e) def close_any_action(self, sender=None): self.open_conversations(direction="right") @@ -598,6 +744,12 @@ class SidebandApp(MDApp): self.check_permissions() + def request_microphone_permission(self): + if RNS.vendor.platformutils.get_platform() == "android": + if not check_permission("android.permission.RECORD_AUDIO"): + RNS.log("Requesting microphone permission", RNS.LOG_DEBUG) + request_permissions(["android.permission.RECORD_AUDIO"]) + def check_storage_permission(self): storage_permissions_ok = False if android_api_version < 30: @@ -784,6 +936,12 @@ class SidebandApp(MDApp): if self.conversations_view != None: self.conversations_view.update() + if self.sideband.getstate("app.flags.new_ticket", allow_cache=True): + def cb(d): + self.sideband.message_router.reload_available_tickets() + self.sideband.setstate("app.flags.new_ticket", False) + Clock.schedule_once(cb, 1.5) + if self.sideband.getstate("wants.viewupdate.conversations", allow_cache=True): if self.conversations_view != None: self.conversations_view.update() @@ -838,6 +996,7 @@ class SidebandApp(MDApp): EventLoop.window.bind(on_keyboard=self.keyboard_event) EventLoop.window.bind(on_key_down=self.keydown_event) + EventLoop.window.bind(on_key_up=self.keyup_event) if __variant__ != "": variant_str = " "+__variant__ @@ -849,6 +1008,17 @@ class SidebandApp(MDApp): self.root.ids.nav_scrollview.effect_cls = ScrollEffect Clock.schedule_once(self.start_core, 0.25) + def keyup_event(self, instance, keyboard, keycode): + if self.keyboard_enabled: + if self.root.ids.screen_manager.current == "messages_screen": + if not self.rec_dialog_is_open: + if not self.messages_view.ids.message_text.focus: + if self.messages_view.ptt_enabled and keycode == 44: + if self.key_ptt_down: + self.key_ptt_down = False + self.message_ptt_up_action() + + def keydown_event(self, instance, keyboard, keycode, text, modifiers): if self.keyboard_enabled: if self.root.ids.screen_manager.current == "map_screen": @@ -882,6 +1052,38 @@ class SidebandApp(MDApp): self.messages_view.ids.message_text.write_tab = True Clock.schedule_once(tab_job, 0.15) + elif self.rec_dialog != None and self.rec_dialog_is_open: + if text == " ": + self.msg_rec_a_rec(None) + elif keycode == 40: + self.msg_rec_a_save(None) + + elif not self.rec_dialog_is_open and not self.messages_view.ids.message_text.focus and self.messages_view.ptt_enabled and keycode == 44: + if not self.key_ptt_down: + self.key_ptt_down = True + self.message_ptt_down_action() + + elif len(modifiers) > 1 and "shift" in modifiers and "ctrl" in modifiers: + def clear_att(): + if self.attach_path != None: + self.attach_path = None + self.attach_type = None + self.update_message_widgets() + if text == "a": + clear_att(); self.message_attachment_action(None) + if text == "i": + clear_att(); self.message_attach_action(attach_type="defimg") + if text == "f": + clear_att(); self.message_attach_action(attach_type="file") + if text == "v": + clear_att() + self.audio_msg_mode = LXMF.AM_OPUS_OGG + self.message_attach_action(attach_type="audio") + if text == "c": + clear_att() + self.audio_msg_mode = LXMF.AM_CODEC2_2400 + self.message_attach_action(attach_type="audio") + if len(modifiers) > 0: if modifiers[0] == "ctrl": if text == "q": @@ -889,7 +1091,10 @@ class SidebandApp(MDApp): if text == "w": if self.root.ids.screen_manager.current == "conversations_screen": - self.quit_action(self) + if self.include_conversations and not self.include_objects: + self.quit_action(self) + else: + self.conversations_action(direction="right") elif self.root.ids.screen_manager.current == "map_settings_screen": self.close_sub_map_action() elif self.root.ids.screen_manager.current == "object_details_screen": @@ -937,13 +1142,18 @@ class SidebandApp(MDApp): else: self.telemetry_action(self) - if text == "o": - # if self.root.ids.screen_manager.current == "telemetry_screen": + if text == "u": self.map_display_own_telemetry() + if text == "o": + self.objects_action() + if text == "r": if self.root.ids.screen_manager.current == "conversations_screen": - self.lxmf_sync_action(self) + if self.include_objects: + self.conversations_action(self, direction="right") + else: + self.lxmf_sync_action(self) elif self.root.ids.screen_manager.current == "telemetry_screen": self.conversations_action(self, direction="right") elif self.root.ids.screen_manager.current == "object_details_screen": @@ -967,10 +1177,13 @@ class SidebandApp(MDApp): # Handle escape/back if key == 27: if self.root.ids.screen_manager.current == "conversations_screen": - if time.time() - self.last_exit_event < 2: - self.quit_action(self) + if not self.include_conversations and self.include_objects: + self.conversations_action(direction="right") else: - self.last_exit_event = time.time() + if time.time() - self.last_exit_event < 2: + self.quit_action(self) + else: + self.last_exit_event = time.time() else: if self.root.ids.screen_manager.current == "hardware_rnode_screen": @@ -1112,12 +1325,18 @@ class SidebandApp(MDApp): Clock.schedule_once(cbu, 0.15+0.25) def open_conversation(self, context_dest, direction="left"): + self.rec_dialog_is_open = False self.outbound_mode_paper = False self.outbound_mode_command = False - if self.sideband.config["propagation_by_default"]: - self.outbound_mode_propagation = True + self.outbound_mode_propagation = False + if self.include_objects and not self.include_conversations: + if self.sideband.config["propagation_by_default"]: + self.outbound_mode_propagation = True + else: + self.outbound_mode_command = True else: - self.outbound_mode_propagation = False + if self.sideband.config["propagation_by_default"]: + self.outbound_mode_propagation = True self.root.ids.screen_manager.transition.direction = direction self.messages_view = Messages(self, context_dest) @@ -1153,11 +1372,17 @@ class SidebandApp(MDApp): Clock.schedule_once(scb, 0.33) def close_messages_action(self, sender=None): + self.rec_dialog_is_open = False self.open_conversations(direction="right") def message_send_action(self, sender=None): + self.rec_dialog_is_open = False if self.messages_view.ids.message_text.text == "": - return + if not (self.attach_type != None and self.attach_path != None): + return + + if self.outbound_mode_command: + return def cb(dt): self.message_send_dispatch(sender) @@ -1165,6 +1390,10 @@ class SidebandApp(MDApp): def message_send_dispatch(self, sender=None): self.messages_view.ids.message_send_button.disabled = True + def cb(dt): + self.messages_view.ids.message_send_button.disabled = False + Clock.schedule_once(cb, 0.5) + if self.root.ids.screen_manager.current == "messages_screen": if self.outbound_mode_propagation and self.sideband.message_router.get_outbound_propagation_node() == None: self.messages_view.send_error_dialog = MDDialog( @@ -1183,10 +1412,14 @@ class SidebandApp(MDApp): else: msg_content = self.messages_view.ids.message_text.text + if msg_content == "": + msg_content = " " + context_dest = self.messages_view.ids.messages_scrollview.active_conversation attachment = None image = None + audio = None if not self.outbound_mode_command and not self.outbound_mode_paper: if self.attach_type != None and self.attach_path != None: try: @@ -1197,6 +1430,14 @@ class SidebandApp(MDApp): with open(self.attach_path, "rb") as af: attachment = [fbn, af.read()] + if self.attach_type == "audio": + if self.audio_msg_mode == LXMF.AM_OPUS_OGG: + with open(self.attach_path, "rb") as af: + audio = [self.audio_msg_mode, af.read()] + elif self.audio_msg_mode >= LXMF.AM_CODEC2_700C and self.audio_msg_mode <= LXMF.AM_CODEC2_3200: + with open(self.attach_path, "rb") as af: + audio = [self.audio_msg_mode, af.read()] + elif self.attach_type == "lbimg": max_size = 320, 320 with PilImage.open(self.attach_path) as im: @@ -1268,7 +1509,7 @@ class SidebandApp(MDApp): self.messages_view.ids.messages_scrollview.scroll_y = 0 self.jobs(0) - elif self.sideband.send_message(msg_content, context_dest, self.outbound_mode_propagation, attachment = attachment, image = image): + elif self.sideband.send_message(msg_content, context_dest, self.outbound_mode_propagation, attachment = attachment, image = image, audio = audio): self.messages_view.ids.message_text.text = "" self.messages_view.ids.messages_scrollview.scroll_y = 0 self.jobs(0) @@ -1286,10 +1527,6 @@ class SidebandApp(MDApp): ], ) self.messages_view.send_error_dialog.open() - - def cb(dt): - self.messages_view.ids.message_send_button.disabled = False - Clock.schedule_once(cb, 0.5) def peer_show_location_action(self, sender): if self.root.ids.screen_manager.current == "messages_screen": @@ -1414,46 +1651,431 @@ class SidebandApp(MDApp): ok_button.bind(on_release=ate_dialog.dismiss) ate_dialog.open() - def message_attach_action(self, attach_type=None): + def display_codec2_error(self): + if self.compat_error_dialog == None: + def cb(sender): + self.compat_error_dialog.dismiss() + self.compat_error_dialog = MDDialog( + title="Could not load Codec2", + text="The Codec2 library could not be loaded. This likely means that you do not have the [b]codec2[/b] package or shared library installed on your system.\n\nThis library is normally installed automatically when Sideband is installed, but on some systems, this is not possible.\n\nTry installing it with a command such as [b]pamac install codec2[/b] or [b]apt install codec2[/b], or by compiling it from source for this system.", + buttons=[ + MDRectangleFlatButton( + text="OK", + font_size=dp(18), + on_release=cb + ) + ], + ) + self.compat_error_dialog.open() + + def play_audio_field(self, audio_field): + if RNS.vendor.platformutils.is_darwin(): + if self.compat_error_dialog == None: + def cb(sender): + self.compat_error_dialog.dismiss() + self.compat_error_dialog = MDDialog( + title="Unsupported Feature on macOS", + text="Audio message functionality is currently only implemented on Linux and Android. Please support the development if you need this feature on macOS.", + buttons=[ + MDRectangleFlatButton( + text="OK", + font_size=dp(18), + on_release=cb + ) + ], + ) + self.compat_error_dialog.open() + return + elif RNS.vendor.platformutils.is_windows(): + if self.compat_error_dialog == None: + def cb(sender): + self.compat_error_dialog.dismiss() + self.compat_error_dialog = MDDialog( + title="Unsupported Feature on Windows", + text="Audio message functionality is currently only implemented on Linux and Android. Please support the development if you need this feature on Windows.", + buttons=[ + MDRectangleFlatButton( + text="OK", + font_size=dp(18), + on_release=cb + ) + ], + ) + self.compat_error_dialog.open() + return + else: + try: + temp_path = None + if self.last_msg_audio != audio_field[1]: + RNS.log("Reloading audio source", RNS.LOG_DEBUG) + if len(audio_field[1]) > 10: + self.last_msg_audio = audio_field[1] + else: + self.last_msg_audio = None + return + + if audio_field[0] == LXMF.AM_OPUS_OGG: + temp_path = self.sideband.rec_cache+"/msg.ogg" + with open(temp_path, "wb") as af: + af.write(self.last_msg_audio) + + elif audio_field[0] >= LXMF.AM_CODEC2_700C and audio_field[0] <= LXMF.AM_CODEC2_3200: + temp_path = self.sideband.rec_cache+"/msg.ogg" + from sideband.audioproc import samples_to_ogg, decode_codec2, detect_codec2 + + target_rate = 8000 + if RNS.vendor.platformutils.is_linux(): + target_rate = 48000 + + if detect_codec2(): + if samples_to_ogg(decode_codec2(audio_field[1], audio_field[0]), temp_path, input_rate=8000, output_rate=target_rate): + RNS.log("Wrote OGG file to: "+temp_path, RNS.LOG_DEBUG) + else: + RNS.log("OGG write failed", RNS.LOG_DEBUG) + else: + self.last_msg_audio = None + self.display_codec2_error() + return + + else: + raise NotImplementedError(audio_field[0]) + + if self.msg_sound == None: + if RNS.vendor.platformutils.is_android(): + from plyer import audio + self.request_microphone_permission() + else: + from sbapp.plyer import audio + + self.msg_sound = audio + + self.msg_sound._file_path = temp_path + self.msg_sound.reload() + + if self.msg_sound != None and self.msg_sound.playing(): + RNS.log("Stopping playback", RNS.LOG_DEBUG) + self.msg_sound.stop() + else: + if self.msg_sound != None: + RNS.log("Starting playback", RNS.LOG_DEBUG) + self.msg_sound.play() + else: + RNS.log("Playback was requested, but no audio data was loaded for playback", RNS.LOG_ERROR) + + except Exception as e: + RNS.log("Error while playing message audio:"+str(e)) + RNS.trace_exception(e) + + def message_ptt_down_action(self, sender=None): + if self.sideband.ui_recording: + return + + self.sideband.ui_started_recording() + self.audio_msg_mode = LXMF.AM_CODEC2_2400 + self.message_attach_action(attach_type="audio", nodialog=True) + if self.rec_dialog == None: + self.message_init_rec_dialog() + self.rec_dialog.recording = True + el_button = self.messages_view.ids.message_ptt_button + el_icon = self.messages_view.ids.message_ptt_button.children[0].children[1] + el_button.theme_text_color="Custom" + el_button.text_color=mdc("Orange","400") + el_button.line_color=mdc("Orange","400") + el_icon.theme_text_color="Custom" + el_icon.text_color=mdc("Orange","400") + def cb(dt): + self.msg_audio.start() + Clock.schedule_once(cb, 0.15) + + + def message_ptt_up_action(self, sender=None): + if not self.sideband.ui_recording: + return + + self.rec_dialog.recording = False + el_button = self.messages_view.ids.message_ptt_button + el_icon = self.messages_view.ids.message_ptt_button.children[0].children[1] + el_button.theme_text_color="Custom" + el_button.text_color=mdc("BlueGray","500") + el_button.line_color=mdc("BlueGray","500") + el_icon.theme_text_color="Custom" + el_icon.text_color=mdc("BlueGray","500") + def cb_s(dt): + try: + self.msg_audio.stop() + except Exception as e: + RNS.log("An error occurred while stopping recording: "+str(e), RNS.LOG_ERROR) + RNS.trace_exception(e) + + self.sideband.ui_stopped_recording() + if self.message_process_audio(): + self.message_send_action() + Clock.schedule_once(cb_s, 0.35) + + def message_process_audio(self): + if self.audio_msg_mode == LXMF.AM_OPUS_OGG: + from sideband.audioproc import voice_processing + proc_path = voice_processing(self.msg_audio._file_path) + if proc_path: + self.attach_path = proc_path + os.unlink(self.msg_audio._file_path) + RNS.log("Using voice-processed OPUS data in OGG container", RNS.LOG_DEBUG) + else: + self.attach_path = self.msg_audio._file_path + RNS.log("Using unmodified OPUS data in OGG container", RNS.LOG_DEBUG) + else: + ap_start = time.time() + from sideband.audioproc import voice_processing + proc_path = voice_processing(self.msg_audio._file_path) + + if proc_path: + opus_file = pyogg.OpusFile(proc_path) + RNS.log("Using voice-processed audio for codec2 encoding", RNS.LOG_DEBUG) + else: + opus_file = pyogg.OpusFile(self.msg_audio._file_path) + RNS.log("Using unprocessed audio data for codec2 encoding", RNS.LOG_DEBUG) + + RNS.log(f"OPUS LOAD {opus_file.frequency}Hz {opus_file.bytes_per_sample*8}bit {opus_file.channels}ch") + + audio = AudioSegment( + bytes(opus_file.as_array()), + frame_rate=opus_file.frequency, + sample_width=opus_file.bytes_per_sample, + channels=opus_file.channels, + ) + audio = audio.split_to_mono()[0] + audio = audio.apply_gain(-audio.max_dBFS) + + if self.audio_msg_mode >= LXMF.AM_CODEC2_700C and self.audio_msg_mode <= LXMF.AM_CODEC2_3200: + audio = audio.set_frame_rate(8000) + audio = audio.set_sample_width(2) + samples = audio.get_array_of_samples() + + from sideband.audioproc import encode_codec2, detect_codec2 + if detect_codec2(): + encoded = encode_codec2(samples, self.audio_msg_mode) + + ap_duration = time.time() - ap_start + RNS.log("Audio processing complete in "+RNS.prettytime(ap_duration), RNS.LOG_DEBUG) + + export_path = self.sideband.rec_cache+"/recording.enc" + with open(export_path, "wb") as export_file: + export_file.write(encoded) + self.attach_path = export_path + os.unlink(self.msg_audio._file_path) + else: + self.display_codec2_error() + return False + + return True + + def message_init_rec_dialog(self): + ss = int(dp(18)) + if RNS.vendor.platformutils.is_android(): + from plyer import audio + self.request_microphone_permission() + else: + from sbapp.plyer import audio + + self.msg_audio = audio + self.msg_audio._file_path = self.sideband.rec_cache+"/recording.ogg" + + def a_rec_action(sender): + if not self.rec_dialog.recording: + self.sideband.ui_started_recording() + RNS.log("Starting recording...") # TODO: Remove + self.rec_dialog.recording = True + el = self.rec_dialog.rec_item.children[0].children[0] + el.ttc = el.theme_text_color; el.tc = el.text_color + el.theme_text_color="Custom" + el.text_color=mdc("Red","400") + el.icon = "stop-circle" + self.rec_dialog.rec_item.text = "[size="+str(ss)+"]Stop Recording[/size]" + def cb(dt): + self.msg_audio.start() + Clock.schedule_once(cb, 0.10) + + else: + self.sideband.ui_stopped_recording() + RNS.log("Stopping recording...") # TODO: Remove + self.rec_dialog.recording = False + self.rec_dialog.rec_item.text = "[size="+str(ss)+"]Start Recording[/size]" + el = self.rec_dialog.rec_item.children[0].children[0] + el.icon = "record" + el.text_color = self.theme_cls._get_text_color() + self.rec_dialog.play_item.disabled = False + self.rec_dialog.save_item.disabled = False + self.msg_audio.stop() + + self.msg_rec_a_rec = a_rec_action + + def a_play(sender): + if self.rec_dialog.recording: + a_rec_action(sender) + + if not self.rec_dialog.playing: + RNS.log("Playing recording...", RNS.LOG_DEBUG) + self.rec_dialog.playing = True + self.rec_dialog.play_item.children[0].children[0].icon = "stop" + self.rec_dialog.play_item.text = "[size="+str(ss)+"]Stop[/size]" + self.msg_audio.play() + else: + RNS.log("Stopping playback...", RNS.LOG_DEBUG) + self.rec_dialog.playing = False + self.rec_dialog.play_item.children[0].children[0].icon = "play" + self.rec_dialog.play_item.text = "[size="+str(ss)+"]Play[/size]" + self.msg_audio.stop() + + self.msg_rec_a_play = a_play + + def a_finished(sender): + RNS.log("Playback finished", RNS.LOG_DEBUG) + self.rec_dialog.playing = False + self.rec_dialog.play_item.children[0].children[0].icon = "play" + self.rec_dialog.play_item.text = "[size="+str(ss)+"]Play[/size]" + + self.msg_audio._finished_callback = a_finished + + def a_save(sender): + if self.rec_dialog.recording: + a_rec_action(sender) + self.rec_dialog_is_open = False + self.rec_dialog.dismiss() + + try: + if self.audio_msg_mode == LXMF.AM_OPUS_OGG: + from sideband.audioproc import voice_processing + proc_path = voice_processing(self.msg_audio._file_path) + if proc_path: + self.attach_path = proc_path + os.unlink(self.msg_audio._file_path) + RNS.log("Using voice-processed OPUS data in OGG container", RNS.LOG_DEBUG) + else: + self.attach_path = self.msg_audio._file_path + RNS.log("Using unmodified OPUS data in OGG container", RNS.LOG_DEBUG) + else: + self.message_process_audio() + + self.update_message_widgets() + toast("Added recorded audio to message") + + except Exception as e: + RNS.trace_exception(e) + + self.msg_rec_a_save = a_save + + cancel_button = MDRectangleFlatButton(text="Cancel", font_size=dp(18)) + rec_item = DialogItem(IconLeftWidget(icon="record", on_release=a_rec_action), text="[size="+str(ss)+"]Start Recording[/size]", on_release=a_rec_action) + play_item = DialogItem(IconLeftWidget(icon="play", on_release=a_play), text="[size="+str(ss)+"]Play[/size]", on_release=a_play, disabled=True) + save_item = DialogItem(IconLeftWidget(icon="content-save-move-outline", on_release=a_save), text="[size="+str(ss)+"]Save to message[/size]", on_release=a_save, disabled=True) + self.rec_dialog = MDDialog( + title="Record Audio", + type="simple", + # text="Test\n", + items=[ + rec_item, + play_item, + save_item, + ], + buttons=[ cancel_button ], + width_offset=dp(32), + ) + cancel_button.bind(on_release=self.rec_dialog.dismiss) + self.rec_dialog.recording = False + self.rec_dialog.playing = False + self.rec_dialog.rec_item = rec_item + self.rec_dialog.play_item = play_item + self.rec_dialog.save_item = save_item + + def message_record_audio_action(self): + ss = int(dp(18)) + if self.rec_dialog == None: + self.message_init_rec_dialog() + + else: + self.rec_dialog.play_item.disabled = True + self.rec_dialog.save_item.disabled = True + self.rec_dialog.recording = False + self.rec_dialog.rec_item.text = "[size="+str(ss)+"]Start Recording[/size]" + self.rec_dialog.rec_item.children[0].children[0].icon = "record" + + self.rec_dialog.open() + self.rec_dialog_is_open = True + + def message_attach_action(self, attach_type=None, nodialog=False): + file_attach_types = ["lbimg", "defimg", "hqimg", "file"] + rec_attach_types = ["audio"] + self.attach_path = None - self.attach_type = attach_type - self.message_select_file_action() + self.rec_dialog_is_open = False + if attach_type in file_attach_types: + self.attach_type = attach_type + if not nodialog: + self.message_select_file_action() + elif attach_type in rec_attach_types: + self.attach_type = attach_type + if not nodialog: + self.message_record_audio_action() def message_attachment_action(self, sender): + self.rec_dialog_is_open = False if self.attach_path == None: - attach_dialog = None def a_img_lb(sender): - attach_dialog.dismiss() + self.attach_dialog.dismiss() self.message_attach_action(attach_type="lbimg") def a_img_def(sender): - attach_dialog.dismiss() + self.attach_dialog.dismiss() self.message_attach_action(attach_type="defimg") def a_img_hq(sender): - attach_dialog.dismiss() + self.attach_dialog.dismiss() self.message_attach_action(attach_type="hqimg") def a_file(sender): - attach_dialog.dismiss() + self.attach_dialog.dismiss() self.message_attach_action(attach_type="file") + def a_audio_hq(sender): + self.attach_dialog.dismiss() + self.audio_msg_mode = LXMF.AM_OPUS_OGG + self.message_attach_action(attach_type="audio") + def a_audio_lb(sender): + self.attach_dialog.dismiss() + self.audio_msg_mode = LXMF.AM_CODEC2_2400 + self.message_attach_action(attach_type="audio") - ss = int(dp(18)) - cancel_button = MDRectangleFlatButton(text="Cancel", font_size=dp(18)) - attach_dialog = MDDialog( - title="Add Attachment", - type="simple", - text="Select the type of attachment you want to send with this message\n", - items=[ - DialogItem(IconLeftWidget(icon="message-image-outline"), text="[size="+str(ss)+"]Low-bandwidth Image[/size]", on_release=a_img_lb), - DialogItem(IconLeftWidget(icon="file-image"), text="[size="+str(ss)+"]Medium Image[/size]", on_release=a_img_def), - DialogItem(IconLeftWidget(icon="image-outline"), text="[size="+str(ss)+"]High-res Image[/size]", on_release=a_img_hq), - DialogItem(IconLeftWidget(icon="file-outline"), text="[size="+str(ss)+"]File Attachment[/size]", on_release=a_file), - ], - buttons=[ cancel_button ], - width_offset=dp(12), - ) + if self.attach_dialog == None: + ss = int(dp(18)) + cancel_button = MDRectangleFlatButton(text="Cancel", font_size=dp(18)) + ad_items = [ + DialogItem(IconLeftWidget(icon="message-image-outline", on_release=a_img_lb), text="[size="+str(ss)+"]Low-bandwidth Image[/size]", on_release=a_img_lb), + DialogItem(IconLeftWidget(icon="file-image", on_release=a_img_def), text="[size="+str(ss)+"]Medium Image[/size]", on_release=a_img_def), + DialogItem(IconLeftWidget(icon="image-outline", on_release=a_img_hq), text="[size="+str(ss)+"]High-res Image[/size]", on_release=a_img_hq), + DialogItem(IconLeftWidget(icon="account-voice", on_release=a_audio_lb), text="[size="+str(ss)+"]Low-bandwidth Voice[/size]", on_release=a_audio_lb), + DialogItem(IconLeftWidget(icon="microphone-message", on_release=a_audio_hq), text="[size="+str(ss)+"]High-quality Voice[/size]", on_release=a_audio_hq), + DialogItem(IconLeftWidget(icon="file-outline", on_release=a_file), text="[size="+str(ss)+"]File Attachment[/size]", on_release=a_file)] + + if RNS.vendor.platformutils.is_windows(): + ad_items.pop(3) + ad_items.pop(3) - cancel_button.bind(on_release=attach_dialog.dismiss) - attach_dialog.open() - attach_dialog.update_width() + if RNS.vendor.platformutils.is_darwin(): + ad_items.pop(3) + ad_items.pop(3) + + if RNS.vendor.platformutils.is_android() and android_api_version < 29: + ad_items.pop(3) + ad_items.pop(3) + + self.attach_dialog = MDDialog( + title="Add Attachment", + type="simple", + text="Select the type of attachment you want to send with this message\n", + items=ad_items, + buttons=[ cancel_button ], + width_offset=dp(32), + ) + + cancel_button.bind(on_release=self.attach_dialog.dismiss) + + self.attach_dialog.open() else: self.attach_path = None @@ -1520,12 +2142,36 @@ class SidebandApp(MDApp): keys_str = "The crytographic keys for the destination address are unknown at this time. You can wait for an announce to arrive, or query the network for the necessary keys." self.messages_view.ids.nokeys_text.text = keys_str self.widget_hide(self.messages_view.ids.message_input_part, True) + self.widget_hide(self.messages_view.ids.message_ptt, True) self.widget_hide(self.messages_view.ids.no_keys_part, False) ### Conversations screen ###################################### def conversations_action(self, sender=None, direction="left", no_transition=False): + self.rec_dialog_is_open = False + if self.include_objects: + self.include_conversations = True + self.include_objects = False + self.conversations_view.update() + + if no_transition: + self.root.ids.screen_manager.transition = self.no_transition + else: + self.root.ids.screen_manager.transition = self.slide_transition + self.root.ids.screen_manager.transition.direction = direction + + self.open_conversations(direction=direction) + + if no_transition: + self.root.ids.screen_manager.transition = self.slide_transition + + def objects_action(self, sender=None, direction="left", no_transition=False): + if self.include_conversations: + self.include_conversations = False + self.include_objects = True + self.conversations_view.update() + if no_transition: self.root.ids.screen_manager.transition = self.no_transition else: @@ -1813,7 +2459,12 @@ class SidebandApp(MDApp): self.information_screen.ids.information_scrollview.effect_cls = ScrollEffect self.information_screen.ids.information_logo.icon = self.sideband.asset_dir+"/rns_256.png" - info = "This is "+self.root.ids.app_version_info.text+", on RNS v"+RNS.__version__+" and LXMF v"+LXMF.__version__+".\n\nHumbly build using the following open components:\n\n - [b]Reticulum[/b] (MIT License)\n - [b]LXMF[/b] (MIT License)\n - [b]KivyMD[/b] (MIT License)\n - [b]Kivy[/b] (MIT License)\n - [b]GeoidHeight[/b] (LGPL License)\n - [b]Python[/b] (PSF License)"+"\n\nGo to [u][ref=link]https://unsigned.io/donate[/ref][/u] to support the project.\n\nThe Sideband app is Copyright (c) 2024 Mark Qvist / unsigned.io\n\nPermission is granted to freely share and distribute binary copies of Sideband v"+__version__+" "+__variant__+", so long as no payment or compensation is charged for said distribution or sharing.\n\nIf you were charged or paid anything for this copy of Sideband, please report it to [b]license@unsigned.io[/b].\n\nTHIS IS EXPERIMENTAL SOFTWARE - SIDEBAND COMES WITH ABSOLUTELY NO WARRANTY - USE AT YOUR OWN RISK AND RESPONSIBILITY" + str_comps = " - [b]Reticulum[/b] (MIT License)\n - [b]LXMF[/b] (MIT License)\n - [b]KivyMD[/b] (MIT License)" + str_comps += "\n - [b]Kivy[/b] (MIT License)\n - [b]Codec2[/b] (LGPL License)\n - [b]PyCodec2[/b] (BSD-3 License)" + str_comps += "\n - [b]PyDub[/b] (MIT License)\n - [b]PyOgg[/b] (Public Domain)" + str_comps += "\n - [b]GeoidHeight[/b] (LGPL License)\n - [b]Python[/b] (PSF License)" + str_comps += "\n\nGo to [u][ref=link]https://unsigned.io/donate[/ref][/u] to support the project.\n\nThe Sideband app is Copyright (c) 2024 Mark Qvist / unsigned.io\n\nPermission is granted to freely share and distribute binary copies of Sideband v"+__version__+" "+__variant__+", so long as no payment or compensation is charged for said distribution or sharing.\n\nIf you were charged or paid anything for this copy of Sideband, please report it to [b]license@unsigned.io[/b].\n\nTHIS IS EXPERIMENTAL SOFTWARE - SIDEBAND COMES WITH ABSOLUTELY NO WARRANTY - USE AT YOUR OWN RISK AND RESPONSIBILITY" + info = "This is "+self.root.ids.app_version_info.text+", on RNS v"+RNS.__version__+" and LXMF v"+LXMF.__version__+".\n\nHumbly build using the following open components:\n\n"+str_comps self.information_screen.ids.information_info.text = info self.information_screen.ids.information_info.bind(on_ref_press=link_exec) @@ -1972,11 +2623,9 @@ class SidebandApp(MDApp): if sender != self.settings_screen.ids.settings_lang_hebrew: self.settings_screen.ids.settings_lang_hebrew.active = False - RNS.log("Sender: "+str(sender)) - if self.settings_screen.ids.settings_lang_default.active: self.sideband.config["input_language"] = None - self.settings_screen.ids.settings_display_name.font_name = "" + self.settings_screen.ids.settings_display_name.font_name = "defaultinput" elif self.settings_screen.ids.settings_lang_chinese.active: self.sideband.config["input_language"] = "chinese" self.settings_screen.ids.settings_display_name.font_name = "chinese" @@ -1994,7 +2643,7 @@ class SidebandApp(MDApp): self.settings_screen.ids.settings_display_name.font_name = "hebrew" else: self.sideband.config["input_language"] = None - self.settings_screen.ids.settings_display_name.font_name = "" + self.settings_screen.ids.settings_display_name.font_name = "defaultinput" self.sideband.save_configuration() @@ -2015,6 +2664,11 @@ class SidebandApp(MDApp): self.sideband.save_configuration() self.sideband.setstate("wants.viewupdate.conversations", True) + def save_display_style_from_trusted_only(sender=None, event=None): + self.sideband.config["display_style_from_all"] = not self.settings_screen.ids.display_style_from_trusted_only.active + self.sideband.save_configuration() + self.sideband.setstate("wants.viewupdate.conversations", True) + def save_advanced_stats(sender=None, event=None): self.sideband.config["advanced_stats"] = self.settings_screen.ids.settings_advanced_statistics.active self.sideband.save_configuration() @@ -2039,6 +2693,11 @@ class SidebandApp(MDApp): self.sideband.config["lxmf_ignore_unknown"] = self.settings_screen.ids.settings_lxmf_ignore_unknown.active self.sideband.save_configuration() + def save_lxmf_ignore_invalid_stamps(sender=None, event=None): + self.sideband.config["lxmf_ignore_invalid_stamps"] = self.settings_screen.ids.settings_ignore_invalid_stamps.active + self.sideband.save_configuration() + self.sideband.update_ignore_invalid_stamps() + def save_lxmf_sync_limit(sender=None, event=None): self.sideband.config["lxmf_sync_limit"] = self.settings_screen.ids.settings_lxmf_sync_limit.active self.sideband.save_configuration() @@ -2064,6 +2723,16 @@ class SidebandApp(MDApp): self.settings_screen.ids.settings_print_command.text = self.sideband.config["print_command"] self.sideband.save_configuration() + def save_lxmf_stamp_cost(sender=None, event=None, save=True): + if self.settings_screen.ids.settings_lxmf_require_stamps.active: + self.widget_hide(self.settings_screen.ids.lxmf_costslider_container, False) + else: + self.widget_hide(self.settings_screen.ids.lxmf_costslider_container, True) + + if save: + self.sideband.config["lxmf_require_stamps"] = self.settings_screen.ids.settings_lxmf_require_stamps.active + self.sideband.save_configuration() + def save_lxmf_periodic_sync(sender=None, event=None, save=True): if self.settings_screen.ids.settings_lxmf_periodic_sync.active: self.widget_hide(self.settings_screen.ids.lxmf_syncslider_container, False) @@ -2094,6 +2763,19 @@ class SidebandApp(MDApp): self.sideband.config["lxmf_sync_interval"] = interval self.sideband.save_configuration() + def stamp_cost_change(sender=None, event=None, save=True): + slider_val = int(self.settings_screen.ids.settings_lxmf_require_stamps_cost.value) + cost_text = str(slider_val) + + self.settings_screen.ids.settings_lxmf_require_stamps_label.text = f"Require stamp cost {cost_text} for incoming messages" + if save: + if slider_val > 32: + slider_val = 32 + if slider_val < 1: + slider_val = 1 + self.sideband.config["lxmf_inbound_stamp_cost"] = slider_val + self.sideband.save_configuration() + self.settings_screen.ids.settings_lxmf_address.text = RNS.hexrep(self.sideband.lxmf_destination.hash, delimit=False) self.settings_screen.ids.settings_identity_hash.text = RNS.hexrep(self.sideband.lxmf_destination.identity.hash, delimit=False) @@ -2131,6 +2813,9 @@ class SidebandApp(MDApp): self.settings_screen.ids.display_style_in_contact_list.active = self.sideband.config["display_style_in_contact_list"] self.settings_screen.ids.display_style_in_contact_list.bind(active=save_display_style_in_contact_list) + self.settings_screen.ids.display_style_from_trusted_only.active = not self.sideband.config["display_style_from_all"] + self.settings_screen.ids.display_style_from_trusted_only.bind(active=save_display_style_from_trusted_only) + self.settings_screen.ids.settings_advanced_statistics.active = self.sideband.config["advanced_stats"] self.settings_screen.ids.settings_advanced_statistics.bind(active=save_advanced_stats) @@ -2146,6 +2831,9 @@ class SidebandApp(MDApp): self.settings_screen.ids.settings_lxmf_ignore_unknown.active = self.sideband.config["lxmf_ignore_unknown"] self.settings_screen.ids.settings_lxmf_ignore_unknown.bind(active=save_lxmf_ignore_unknown) + self.settings_screen.ids.settings_ignore_invalid_stamps.active = self.sideband.config["lxmf_ignore_invalid_stamps"] + self.settings_screen.ids.settings_ignore_invalid_stamps.bind(active=save_lxmf_ignore_invalid_stamps) + self.settings_screen.ids.settings_lxmf_periodic_sync.active = self.sideband.config["lxmf_periodic_sync"] self.settings_screen.ids.settings_lxmf_periodic_sync.bind(active=save_lxmf_periodic_sync) save_lxmf_periodic_sync(save=False) @@ -2157,6 +2845,22 @@ class SidebandApp(MDApp): self.settings_screen.ids.settings_lxmf_sync_interval.value = self.interval_to_slider_val(self.sideband.config["lxmf_sync_interval"]) sync_interval_change(save=False) + self.settings_screen.ids.settings_lxmf_require_stamps.active = self.sideband.config["lxmf_require_stamps"] + self.settings_screen.ids.settings_lxmf_require_stamps.bind(active=save_lxmf_stamp_cost) + save_lxmf_stamp_cost(save=False) + + def stamp_cost_change_cb(sender=None, event=None): + stamp_cost_change(sender=sender, event=event, save=False) + self.settings_screen.ids.settings_lxmf_require_stamps_cost.bind(value=stamp_cost_change_cb) + self.settings_screen.ids.settings_lxmf_require_stamps_cost.bind(on_touch_up=stamp_cost_change) + cost_val = self.sideband.config["lxmf_inbound_stamp_cost"] + if cost_val == None or cost_val < 1: + cost_val = 1 + if cost_val > 32: + cost_val = 32 + self.settings_screen.ids.settings_lxmf_require_stamps_cost.value = cost_val + stamp_cost_change(save=False) + if self.sideband.config["lxmf_sync_limit"] == None or self.sideband.config["lxmf_sync_limit"] == False: sync_limit = False else: @@ -2195,7 +2899,7 @@ class SidebandApp(MDApp): elif input_lang == "korean": self.settings_screen.ids.settings_lang_korean.active = True self.settings_screen.ids.settings_display_name.font_name = "korean" - elif input_lang == "devangari": + elif input_lang == "combined": self.settings_screen.ids.settings_lang_devangari.active = True self.settings_screen.ids.settings_display_name.font_name = "combined" elif input_lang == "hebrew": @@ -5026,22 +5730,22 @@ class SidebandApp(MDApp): def close_sub_map_action(self, sender=None): self.map_action(direction="right") - def object_details_action(self, sender=None, from_conv=False, from_telemetry=False, source_dest=None, direction="left"): + def object_details_action(self, sender=None, from_conv=False, from_objects=False, from_telemetry=False, source_dest=None, direction="left"): if self.root.ids.screen_manager.has_screen("object_details_screen"): - self.object_details_open(sender=sender, from_conv=from_conv, from_telemetry=from_telemetry, source_dest=source_dest, direction=direction) + self.object_details_open(sender=sender, from_conv=from_conv, from_objects=from_objects, from_telemetry=from_telemetry, source_dest=source_dest, direction=direction) else: self.loader_action(direction=direction) def final(dt): self.object_details_init() def o(dt): - self.object_details_open(sender=sender, from_conv=from_conv, from_telemetry=from_telemetry, source_dest=source_dest, no_transition=True) + self.object_details_open(sender=sender, from_conv=from_conv, from_objects=from_objects, from_telemetry=from_telemetry, source_dest=source_dest, no_transition=True) Clock.schedule_once(o, ll_ot) Clock.schedule_once(final, ll_ft) def object_details_init(self): self.object_details_screen = ObjectDetails(self) - def object_details_open(self, sender=None, from_conv=False, from_telemetry=False, source_dest=None, direction="left", no_transition=False): + def object_details_open(self, sender=None, from_conv=False, from_objects=False, from_telemetry=False, source_dest=None, direction="left", no_transition=False): if no_transition: self.root.ids.screen_manager.transition = self.no_transition else: @@ -5062,10 +5766,13 @@ class SidebandApp(MDApp): self.root.ids.nav_drawer.set_state("closed") if telemetry_source == None: - self.conversations_action(direction="right") + if self.include_objects and not self.include_conversations: + self.objects_action(direction="right") + else: + self.conversations_action(direction="right") else: - Clock.schedule_once(lambda dt: self.object_details_screen.set_source(telemetry_source, from_conv=from_conv, from_telemetry=from_telemetry), 0.0) + Clock.schedule_once(lambda dt: self.object_details_screen.set_source(telemetry_source, from_conv=from_conv, from_objects=from_objects, from_telemetry=from_telemetry), 0.0) def vj(dt): self.root.ids.screen_manager.current = "object_details_screen" @@ -5307,28 +6014,58 @@ The Propagation Nodes also distribute copies of messages between each other, suc If you use Reticulum and LXMF on hardware that does not carry any identifiers tied to you, it is possible to establish a completely free and anonymous communication system with Reticulum and LXMF clients.""" guide_text8 = """ -[size=18dp][b]Keyboard Shortcuts[/b][/size][size=5dp]\n \n[/size] - Ctrl+Q or Ctrl-W Shut down Sideband - - Ctrl-D or Ctrl-S Send message - - Ctrl-R Show Conversations - - Ctrl-L Show Announce Stream - - Ctrl-M Show Situation Map - - Ctrl-T Show Telemetry Setup - - Ctrl-N New conversation - - Ctrl-G Show guide""" +[size=18dp][b]Keyboard Shortcuts[/b][/size][size=5dp]\n \n[/size]To ease navigation and operation of the program, Sideband has keyboard shortcuts mapped to the most common actions. A reference is included below. + +[b]Quick Actions[/b] + - [b]Ctrl-W[/b] Go back + - [b]Ctrl+Q[/b] Shut down Sideband + - [b]Ctrl-R[/b] Start LXMF sync (from Conversations screen) + - [b]Ctrl-N[/b] Create new conversation + + [b]Message Actions[/b] + - [b]Ctrl-Shift-A[/b] add message attachment + - [b]Ctrl-Shift-V[/b] add high-quality voice + - [b]Ctrl-Shift-C[/b] add low-bandwidth voice + - [b]Ctrl-Shift-I[/b] add medium-quality image + - [b]Ctrl-Shift-F[/b] add file + - [b]Ctrl-D[/b] or [b]Ctrl-S[/b] Send message + + [b]Voice Recording[/b] + - [b]Space[/b] Start/stop recording + - [b]Enter[/b] Save recording to message + + [b]Navigation[/b] + - [b]Ctrl-[i]n[/i][/b] Go to conversation number [i]n[/i] + - [b]Ctrl-R[/b] Go to Conversations + - [b]Ctrl-O[/b] Go to Objects & Devices + - [b]Ctrl-L[/b] Go to Announce Stream + - [b]Ctrl-M[/b] Go to Situation Map + - [b]Ctrl-T[/b] Go to Telemetry configuration + - [b]Ctrl-G[/b] Go to Guide + - [b]Ctrl-U[/b] Display own telemetry + +[b]Map Controls[/b] + - [b]Up[/b], [b]down[/b], [b]left[/b], [b]right[/b] Navigate + - [b]W[/b], [b]A[/b], [b]S[/b], [b]D[/b] Navigate + - [b]H[/b], [b]J[/b], [b]L[/b], [b]K[/b] Navigate + - [b]E[/b] or [b]+[/b] Zoom in + - [b]Q[/b] or [b]-[/b] Zoom out + - Hold [b]Shift[/b] to navigate more coarsely + - Hold [b]Alt[/b] to navigate more finely""" guide_text9 = """ -[size=18dp][b]Sow Seeds Of Freedom[/b][/size][size=5dp]\n \n[/size]It took me more than seven years to design and built the entire ecosystem of software and hardware that makes this possible. If this project is valuable to you, please go to [u][ref=link]https://unsigned.io/donate[/ref][/u] to support the project with a donation. Every donation directly makes the entire Reticulum project possible. +[size=18dp][b]Please Support This Project[/b][/size][size=5dp]\n \n[/size]It took me more than seven years to design and built the entire ecosystem of software and hardware that makes this possible. If this project is valuable to you, please go to [u][ref=link]https://unsigned.io/donate[/ref][/u] to support the project with a donation. Every donation directly makes the entire Reticulum project possible. Thank you very much for using Free Communications Systems. """ info1 = guide_text1 - info2 = guide_text2 - info3 = guide_text3 - info4 = guide_text4 - info5 = guide_text5 - info6 = guide_text6 - info7 = guide_text7 - info8 = guide_text8 + info2 = guide_text8 + info3 = guide_text2 + info4 = guide_text3 + info5 = guide_text4 + info6 = guide_text5 + info7 = guide_text6 + info8 = guide_text7 info9 = guide_text9 if self.theme_cls.theme_style == "Dark": @@ -5407,6 +6144,7 @@ class CustomOneLineIconListItem(OneLineIconListItem): class DialogItem(OneLineIconListItem): divider = None + icon = StringProperty() class MDMapIconButton(MDIconButton): pass @@ -5436,6 +6174,7 @@ def run(): is_daemon=True ) + sideband.version_str = "v"+__version__+" "+__variant__ sideband.start() while True: time.sleep(5) diff --git a/sbapp/patches/AndroidManifest.tmpl.xml b/sbapp/patches/AndroidManifest.tmpl.xml index 6cd45c6..226a8ae 100644 --- a/sbapp/patches/AndroidManifest.tmpl.xml +++ b/sbapp/patches/AndroidManifest.tmpl.xml @@ -3,7 +3,6 @@ com.gamemaker.game --> @@ -46,16 +45,7 @@ {{ args.extra_manifest_xml }} - - + - {% if args.launcher %} + {% if args.launcher %} - {% else %} - - {% endif %} + {% if args.home_app %} + + + {% endif %} + + {%- if args.intent_filters -%} {{- args.intent_filters -}} {%- endif -%} diff --git a/sbapp/patches/PythonService.java b/sbapp/patches/PythonService.java index d59fe1c..48fc6ff 100644 --- a/sbapp/patches/PythonService.java +++ b/sbapp/patches/PythonService.java @@ -113,7 +113,7 @@ public class PythonService extends Service implements Runnable { Context context = getApplicationContext(); Intent contextIntent = new Intent(context, PythonActivity.class); PendingIntent pIntent = PendingIntent.getActivity(context, 0, contextIntent, - PendingIntent.FLAG_UPDATE_CURRENT); + PendingIntent.FLAG_IMMUTABLE | PendingIntent.FLAG_UPDATE_CURRENT); if (Build.VERSION.SDK_INT < Build.VERSION_CODES.O) { notification = new Notification( diff --git a/sbapp/patches/device_filter.xml b/sbapp/patches/device_filter.xml index b003690..ea67a38 100644 --- a/sbapp/patches/device_filter.xml +++ b/sbapp/patches/device_filter.xml @@ -39,4 +39,7 @@ + + + \ No newline at end of file diff --git a/sbapp/plyer/__init__.py b/sbapp/plyer/__init__.py index ab87901..9bfb190 100644 --- a/sbapp/plyer/__init__.py +++ b/sbapp/plyer/__init__.py @@ -13,11 +13,15 @@ __all__ = ( 'stt', 'temperature', 'tts', 'uniqueid', 'vibrator', 'wifi', 'devicename' ) -__version__ = '2.1.0.dev0' +__version__ = '2.2.0.dev0' - -from plyer import facades -from plyer.utils import Proxy +import RNS +if RNS.vendor.platformutils.is_android(): + from plyer import facades + from plyer.utils import Proxy +else: + from sbapp.plyer import facades + from sbapp.plyer.utils import Proxy #: Accelerometer proxy to :class:`plyer.facades.Accelerometer` accelerometer = Proxy('accelerometer', facades.Accelerometer) diff --git a/sbapp/plyer/facades/__init__.py b/sbapp/plyer/facades/__init__.py index c1e2560..81a0f63 100644 --- a/sbapp/plyer/facades/__init__.py +++ b/sbapp/plyer/facades/__init__.py @@ -14,38 +14,76 @@ __all__ = ('Accelerometer', 'Audio', 'Barometer', 'Battery', 'Call', 'Camera', 'Processors', 'StoragePath', 'Keystore', 'Bluetooth', 'Screenshot', 'STT', 'DeviceName') -from plyer.facades.accelerometer import Accelerometer -from plyer.facades.audio import Audio -from plyer.facades.barometer import Barometer -from plyer.facades.battery import Battery -from plyer.facades.call import Call -from plyer.facades.camera import Camera -from plyer.facades.compass import Compass -from plyer.facades.email import Email -from plyer.facades.filechooser import FileChooser -from plyer.facades.flash import Flash -from plyer.facades.gps import GPS -from plyer.facades.gravity import Gravity -from plyer.facades.gyroscope import Gyroscope -from plyer.facades.irblaster import IrBlaster -from plyer.facades.light import Light -from plyer.facades.proximity import Proximity -from plyer.facades.orientation import Orientation -from plyer.facades.notification import Notification -from plyer.facades.sms import Sms -from plyer.facades.stt import STT -from plyer.facades.tts import TTS -from plyer.facades.uniqueid import UniqueID -from plyer.facades.vibrator import Vibrator -from plyer.facades.wifi import Wifi -from plyer.facades.temperature import Temperature -from plyer.facades.humidity import Humidity -from plyer.facades.spatialorientation import SpatialOrientation -from plyer.facades.brightness import Brightness -from plyer.facades.keystore import Keystore -from plyer.facades.storagepath import StoragePath -from plyer.facades.bluetooth import Bluetooth -from plyer.facades.processors import Processors -from plyer.facades.cpu import CPU -from plyer.facades.screenshot import Screenshot -from plyer.facades.devicename import DeviceName +import RNS +if RNS.vendor.platformutils.is_android(): + from plyer.facades.accelerometer import Accelerometer + from plyer.facades.audio import Audio + from plyer.facades.barometer import Barometer + from plyer.facades.battery import Battery + from plyer.facades.call import Call + from plyer.facades.camera import Camera + from plyer.facades.compass import Compass + from plyer.facades.email import Email + from plyer.facades.filechooser import FileChooser + from plyer.facades.flash import Flash + from plyer.facades.gps import GPS + from plyer.facades.gravity import Gravity + from plyer.facades.gyroscope import Gyroscope + from plyer.facades.irblaster import IrBlaster + from plyer.facades.light import Light + from plyer.facades.proximity import Proximity + from plyer.facades.orientation import Orientation + from plyer.facades.notification import Notification + from plyer.facades.sms import Sms + from plyer.facades.stt import STT + from plyer.facades.tts import TTS + from plyer.facades.uniqueid import UniqueID + from plyer.facades.vibrator import Vibrator + from plyer.facades.wifi import Wifi + from plyer.facades.temperature import Temperature + from plyer.facades.humidity import Humidity + from plyer.facades.spatialorientation import SpatialOrientation + from plyer.facades.brightness import Brightness + from plyer.facades.keystore import Keystore + from plyer.facades.storagepath import StoragePath + from plyer.facades.bluetooth import Bluetooth + from plyer.facades.processors import Processors + from plyer.facades.cpu import CPU + from plyer.facades.screenshot import Screenshot + from plyer.facades.devicename import DeviceName +else: + from sbapp.plyer.facades.accelerometer import Accelerometer + from sbapp.plyer.facades.audio import Audio + from sbapp.plyer.facades.barometer import Barometer + from sbapp.plyer.facades.battery import Battery + from sbapp.plyer.facades.call import Call + from sbapp.plyer.facades.camera import Camera + from sbapp.plyer.facades.compass import Compass + from sbapp.plyer.facades.email import Email + from sbapp.plyer.facades.filechooser import FileChooser + from sbapp.plyer.facades.flash import Flash + from sbapp.plyer.facades.gps import GPS + from sbapp.plyer.facades.gravity import Gravity + from sbapp.plyer.facades.gyroscope import Gyroscope + from sbapp.plyer.facades.irblaster import IrBlaster + from sbapp.plyer.facades.light import Light + from sbapp.plyer.facades.proximity import Proximity + from sbapp.plyer.facades.orientation import Orientation + from sbapp.plyer.facades.notification import Notification + from sbapp.plyer.facades.sms import Sms + from sbapp.plyer.facades.stt import STT + from sbapp.plyer.facades.tts import TTS + from sbapp.plyer.facades.uniqueid import UniqueID + from sbapp.plyer.facades.vibrator import Vibrator + from sbapp.plyer.facades.wifi import Wifi + from sbapp.plyer.facades.temperature import Temperature + from sbapp.plyer.facades.humidity import Humidity + from sbapp.plyer.facades.spatialorientation import SpatialOrientation + from sbapp.plyer.facades.brightness import Brightness + from sbapp.plyer.facades.keystore import Keystore + from sbapp.plyer.facades.storagepath import StoragePath + from sbapp.plyer.facades.bluetooth import Bluetooth + from sbapp.plyer.facades.processors import Processors + from sbapp.plyer.facades.cpu import CPU + from sbapp.plyer.facades.screenshot import Screenshot + from sbapp.plyer.facades.devicename import DeviceName diff --git a/sbapp/plyer/facades/audio.py b/sbapp/plyer/facades/audio.py index c5e1db5..0394037 100644 --- a/sbapp/plyer/facades/audio.py +++ b/sbapp/plyer/facades/audio.py @@ -94,6 +94,7 @@ class Audio: # private def _start(self): + raise IOError("JUICE") raise NotImplementedError() def _stop(self): diff --git a/sbapp/plyer/facades/humidity.py b/sbapp/plyer/facades/humidity.py index ac63333..8d72239 100644 --- a/sbapp/plyer/facades/humidity.py +++ b/sbapp/plyer/facades/humidity.py @@ -4,7 +4,7 @@ class Humidity: With method `enable` you can turn on Humidity sensor and 'disable' method stops the sensor. Use property `tell` to get humidity value. - + Supported Platforms ------------------- Android diff --git a/sbapp/plyer/facades/maps.py b/sbapp/plyer/facades/maps.py new file mode 100644 index 0000000..ba8d5fe --- /dev/null +++ b/sbapp/plyer/facades/maps.py @@ -0,0 +1,88 @@ +''' +Maps +======= +The :class:`Maps` creates a client for accessing the default Maps API. + +Holds features such as opening a location by +address & latitude/longitude, create queries, or find directions between +two points + +Simple Examples +--------------- + +Perform a search:: + + >>> from plyer import maps + >>> maps.search('Mexican Restaurant') + >>> maps.search('Taco Bell', latitude=38.5810606, longitude=-121.493895) + +Get directions to a location:: + + >>> from plyer import maps + >>> maps.route('Cupertino', 'San Francisco') + >>> maps.route('41.9156316,-72.6130726', '42.65228271484,-73.7577362060') + +View a specific location:: + + >>> from plyer import maps + >>> maps.open_by_address('25 Leshin Lane, Hightstown, NJ') + >>> maps.open_by_lat_long(30.451468, -91.187149) + >>> maps.open_by_lat_long(30.451468, -91.187149, name='Home') + +Supported Platforms +------------------- +macOS, iOS +--------------- +''' + + +class Maps: + ''' + Maps facade. + ''' + + def open_by_address(self, address, **kwargs): + ''' + Open the specificed location by address in the default Maps API + ''' + self._open_by_address(address, **kwargs) + + def open_by_lat_long(self, latitude, longitude, **kwargs): + ''' + Open the specificed location by latitude & longitude coordinates + in the default Maps API + ''' + self._open_by_lat_long(latitude, longitude, **kwargs) + + def search(self, query, **kwargs): + ''' + The query. This parameter is treated as if its value had been typed + into the Maps search field by the user. + + Note that query=* is not supported + ''' + self._search(query, **kwargs) + + def route(self, saddr, daddr, **kwargs): + ''' + To provide navigation directions from one location to another. + + :param saddr: The source address to be used as the starting + point for directions. + + :param daddr: The destination address to be used as the + destination point for directions. + ''' + self._route(saddr, daddr, **kwargs) + + def _open_by_address(self, address, **kwargs): + raise NotImplementedError() + + def _open_by_lat_long(self, latitude, longitude, **kwargs): + raise NotImplementedError() + + def _search(self, query, **kwargs): + raise NotImplementedError() + + def _route(self, saddr, daddr, **kwargs): + raise NotImplementedError() diff --git a/sbapp/plyer/facades/notification.py b/sbapp/plyer/facades/notification.py index 71c7d53..db0d227 100644 --- a/sbapp/plyer/facades/notification.py +++ b/sbapp/plyer/facades/notification.py @@ -45,8 +45,8 @@ class Notification: Notification facade. ''' - def notify(self, title='', message='', app_name='', app_icon='', notification_icon=None, - timeout=10, ticker='', toast=False, hints={}, context_override=None): + def notify(self, title='', message='', app_name='', app_icon='', + timeout=10, ticker='', toast=False, hints={}): ''' Send a notification. @@ -83,8 +83,8 @@ class Notification: self._notify( title=title, message=message, - app_icon=app_icon, app_name=app_name, notification_icon=notification_icon, - timeout=timeout, ticker=ticker, toast=toast, hints=hints, context_override=context_override + app_icon=app_icon, app_name=app_name, + timeout=timeout, ticker=ticker, toast=toast, hints=hints ) # private diff --git a/sbapp/plyer/facades/orientation.py b/sbapp/plyer/facades/orientation.py index 56ea0ba..077de55 100644 --- a/sbapp/plyer/facades/orientation.py +++ b/sbapp/plyer/facades/orientation.py @@ -30,7 +30,7 @@ To set sensor:: Supported Platforms ------------------- -Android +Android, Linux ''' diff --git a/sbapp/plyer/facades/sms.py b/sbapp/plyer/facades/sms.py index 0e58f44..5940822 100644 --- a/sbapp/plyer/facades/sms.py +++ b/sbapp/plyer/facades/sms.py @@ -23,7 +23,7 @@ To send sms:: Supported Platforms ------------------- -Android, iOS +Android, iOS, macOS ''' @@ -33,17 +33,23 @@ class Sms: Sms facade. ''' - def send(self, recipient, message): + def send(self, recipient, message, mode=None, **kwargs): ''' Send SMS or open SMS interface. + Includes optional `mode` parameter for macOS that can be set to + `'SMS'` if carrier-activated device is correctly paired and + configured to macOS. :param recipient: The receiver :param message: the message + :param mode: (optional, macOS only), can be set to 'iMessage' + (default) or 'SMS' :type recipient: number :type message: str + :type mode: str ''' - self._send(recipient=recipient, message=message) + self._send(recipient=recipient, message=message, mode=mode, **kwargs) # private diff --git a/sbapp/plyer/platforms/android/accelerometer.py b/sbapp/plyer/platforms/android/accelerometer.py index 00c73cf..7754579 100644 --- a/sbapp/plyer/platforms/android/accelerometer.py +++ b/sbapp/plyer/platforms/android/accelerometer.py @@ -70,7 +70,7 @@ class AndroidAccelerometer(Accelerometer): return (None, None, None) def __del__(self): - if(self.bState): + if self.bState: self._disable() super().__del__() diff --git a/sbapp/plyer/platforms/android/audio.py b/sbapp/plyer/platforms/android/audio.py index 9f000ff..33a0a0d 100644 --- a/sbapp/plyer/platforms/android/audio.py +++ b/sbapp/plyer/platforms/android/audio.py @@ -1,3 +1,5 @@ +import time +import threading from jnius import autoclass from plyer.facades.audio import Audio @@ -20,17 +22,45 @@ class AndroidAudio(Audio): ''' def __init__(self, file_path=None): - default_path = '/sdcard/testrecorder.3gp' + default_path = None super().__init__(file_path or default_path) self._recorder = None self._player = None + self._check_thread = None + self._finished_callback = None + self._format = "opus" + self.is_playing = False + + def _check_playback(self): + while self._player and self._player.isPlaying(): + time.sleep(0.25) + + self.is_playing = False + + if self._finished_callback and callable(self._finished_callback): + self._check_thread = None + self._finished_callback(self) + def _start(self): self._recorder = MediaRecorder() - self._recorder.setAudioSource(AudioSource.DEFAULT) - self._recorder.setOutputFormat(OutputFormat.DEFAULT) - self._recorder.setAudioEncoder(AudioEncoder.DEFAULT) + if self._format == "aac": + self._recorder.setAudioSource(AudioSource.DEFAULT) + self._recorder.setAudioSamplingRate(48000) + self._recorder.setAudioEncodingBitRate(64000) + self._recorder.setAudioChannels(1) + self._recorder.setOutputFormat(OutputFormat.MPEG_4) + self._recorder.setAudioEncoder(AudioEncoder.AAC) + + else: + self._recorder.setAudioSource(AudioSource.DEFAULT) + self._recorder.setAudioSamplingRate(48000) + self._recorder.setAudioEncodingBitRate(12000) + self._recorder.setAudioChannels(1) + self._recorder.setOutputFormat(OutputFormat.OGG) + self._recorder.setAudioEncoder(AudioEncoder.OPUS) + self._recorder.setOutputFile(self.file_path) self._recorder.prepare() @@ -38,20 +68,40 @@ class AndroidAudio(Audio): def _stop(self): if self._recorder: - self._recorder.stop() - self._recorder.release() + try: + self._recorder.stop() + self._recorder.release() + except Exception as e: + print("Could not stop recording: "+str(e)) + self._recorder = None if self._player: - self._player.stop() - self._player.release() + try: + self._player.stop() + self._player.release() + except Exception as e: + print("Could not stop playback: "+str(e)) + self._player = None + self.is_playing = False + def _play(self): self._player = MediaPlayer() self._player.setDataSource(self.file_path) self._player.prepare() self._player.start() + self.is_playing = True + + self._check_thread = threading.Thread(target=self._check_playback, daemon=True) + self._check_thread.start() + + def reload(self): + self._stop() + + def playing(self): + return self.is_playing def instance(): diff --git a/sbapp/plyer/platforms/android/camera.py b/sbapp/plyer/platforms/android/camera.py index 19707bd..3384a66 100644 --- a/sbapp/plyer/platforms/android/camera.py +++ b/sbapp/plyer/platforms/android/camera.py @@ -14,7 +14,7 @@ Uri = autoclass('android.net.Uri') class AndroidCamera(Camera): def _take_picture(self, on_complete, filename=None): - assert(on_complete is not None) + assert on_complete is not None self.on_complete = on_complete self.filename = filename android.activity.unbind(on_activity_result=self._on_activity_result) @@ -26,7 +26,7 @@ class AndroidCamera(Camera): activity.startActivityForResult(intent, 0x123) def _take_video(self, on_complete, filename=None): - assert(on_complete is not None) + assert on_complete is not None self.on_complete = on_complete self.filename = filename android.activity.unbind(on_activity_result=self._on_activity_result) diff --git a/sbapp/plyer/platforms/android/compass.py b/sbapp/plyer/platforms/android/compass.py index fcf0483..5bb16c6 100644 --- a/sbapp/plyer/platforms/android/compass.py +++ b/sbapp/plyer/platforms/android/compass.py @@ -110,7 +110,7 @@ class AndroidCompass(Compass): return (None, None, None, None, None, None) def __del__(self): - if(self.bState): + if self.bState: self._disable() super().__del__() diff --git a/sbapp/plyer/platforms/android/filechooser.py b/sbapp/plyer/platforms/android/filechooser.py index b809ccb..b8c943c 100644 --- a/sbapp/plyer/platforms/android/filechooser.py +++ b/sbapp/plyer/platforms/android/filechooser.py @@ -43,7 +43,7 @@ using that result will use an incorrect one i.e. the default value of .. versionadded:: 1.4.0 ''' -from os.path import join, basename +from os.path import join from random import randint from android import activity, mActivity @@ -62,6 +62,8 @@ Long = autoclass('java.lang.Long') IMedia = autoclass('android.provider.MediaStore$Images$Media') VMedia = autoclass('android.provider.MediaStore$Video$Media') AMedia = autoclass('android.provider.MediaStore$Audio$Media') +Files = autoclass('android.provider.MediaStore$Files') +FileOutputStream = autoclass('java.io.FileOutputStream') class AndroidFileChooser(FileChooser): @@ -74,6 +76,7 @@ class AndroidFileChooser(FileChooser): # filechooser activity <-> result pair identification select_code = None + save_code = None # default selection value selection = None @@ -105,6 +108,7 @@ class AndroidFileChooser(FileChooser): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.select_code = randint(123456, 654321) + self.save_code = randint(123456, 654321) self.selection = None # bind a function for a response from filechooser activity @@ -139,9 +143,11 @@ class AndroidFileChooser(FileChooser): # create Intent for opening file_intent = Intent(Intent.ACTION_GET_CONTENT) - if not self.selected_mime_type or \ - type(self.selected_mime_type) != str or \ - self.selected_mime_type not in self.mime_type: + if ( + not self.selected_mime_type + or not isinstance(self.selected_mime_type, str) + or self.selected_mime_type not in self.mime_type + ): file_intent.setType("*/*") else: file_intent.setType(self.mime_type[self.selected_mime_type]) @@ -163,6 +169,38 @@ class AndroidFileChooser(FileChooser): self.select_code ) + def _save_file(self, **kwargs): + self._save_callback = kwargs.pop("callback") + + title = kwargs.pop("title", None) + + self.selected_mime_type = \ + kwargs.pop("filters")[0] if "filters" in kwargs else "" + + file_intent = Intent(Intent.ACTION_CREATE_DOCUMENT) + if ( + not self.selected_mime_type + or not isinstance(self.selected_mime_type, str) + or self.selected_mime_type not in self.mime_type + ): + file_intent.setType("*/*") + else: + file_intent.setType(self.mime_type[self.selected_mime_type]) + file_intent.addCategory( + Intent.CATEGORY_OPENABLE + ) + + if title: + file_intent.putExtra(Intent.EXTRA_TITLE, title) + + mActivity.startActivityForResult( + Intent.createChooser(file_intent, cast( + 'java.lang.CharSequence', + String("FileChooser") + )), + self.save_code + ) + def _on_activity_result(self, request_code, result_code, data): ''' Listener for ``android.app.Activity.onActivityResult()`` assigned @@ -171,28 +209,41 @@ class AndroidFileChooser(FileChooser): .. versionadded:: 1.4.0 ''' - # not our response - if request_code != self.select_code: + # bad data + if data is None: return if result_code != Activity.RESULT_OK: # The action had been cancelled. return - selection = [] - # Process multiple URI if multiple files selected - try: - for count in range(data.getClipData().getItemCount()): - ele = self._resolve_uri( - data.getClipData().getItemAt(count).getUri()) or [] - selection.append(ele) - except Exception: - selection = [self._resolve_uri(data.getData()), ] + if request_code == self.select_code: + selection = [] + # Process multiple URI if multiple files selected + try: + for count in range(data.getClipData().getItemCount()): + ele = self._resolve_uri( + data.getClipData().getItemAt(count).getUri()) or [] + selection.append(ele) + except Exception: + selection = [self._resolve_uri(data.getData()), ] - # return value to object - self.selection = selection - # return value via callback - self._handle_selection(selection) + # return value to object + self.selection = selection + # return value via callback + self._handle_selection(selection) + + elif request_code == self.save_code: + uri = data.getData() + + with mActivity.getContentResolver().openFileDescriptor( + uri, "w" + ) as pfd: + with FileOutputStream( + pfd.getFileDescriptor() + ) as fileOutputStream: + # return value via callback + self._save_callback(fileOutputStream) @staticmethod def _handle_external_documents(uri): @@ -206,28 +257,19 @@ class AndroidFileChooser(FileChooser): file_id = DocumentsContract.getDocumentId(uri) file_type, file_name = file_id.split(':') - # internal SD card mostly mounted as a files storage in phone - internal = storagepath.get_external_storage_dir() + primary_storage = storagepath.get_external_storage_dir() + sdcard_storage = storagepath.get_sdcard_dir() - # external (removable) SD card i.e. microSD - external = storagepath.get_sdcard_dir() - try: - external_base = basename(external) - except TypeError: - external_base = basename(internal) + directory = primary_storage - # resolve sdcard path - sd_card = internal - - # because external might have /storage/.../1 or other suffix - # and file_type might be only a part of the real folder in /storage - if file_type in external_base or external_base in file_type: - sd_card = external + if file_type == "primary": + directory = primary_storage elif file_type == "home": - sd_card = join(Environment.getExternalStorageDirectory( - ).getAbsolutePath(), Environment.DIRECTORY_DOCUMENTS) + directory = join(primary_storage, Environment.DIRECTORY_DOCUMENTS) + elif sdcard_storage and file_type in sdcard_storage: + directory = sdcard_storage - return join(sd_card, file_name) + return join(directory, file_name) @staticmethod def _handle_media_documents(uri): @@ -248,6 +290,11 @@ class AndroidFileChooser(FileChooser): uri = VMedia.EXTERNAL_CONTENT_URI elif file_type == 'audio': uri = AMedia.EXTERNAL_CONTENT_URI + + # Other file type was selected (probably in the Documents folder) + else: + uri = Files.getContentUri("external") + return file_name, selection, uri @staticmethod @@ -279,6 +326,23 @@ class AndroidFileChooser(FileChooser): .. versionadded:: 1.4.0 ''' + try: + download_dir = Environment.getExternalStoragePublicDirectory( + Environment.DIRECTORY_DOWNLOADS + ).getPath() + path = AndroidFileChooser._parse_content( + uri=uri, + projection=["_display_name"], + selection=None, + selection_args=None, + sort_order=None, + ) + return join(download_dir, path) + + except Exception: + import traceback + traceback.print_exc() + # known locations, differ between machines downloads = [ 'content://downloads/public_downloads', @@ -441,6 +505,8 @@ class AndroidFileChooser(FileChooser): mode = kwargs.pop('mode', None) if mode == 'open': self._open_file(**kwargs) + elif mode == 'save': + self._save_file(**kwargs) def instance(): diff --git a/sbapp/plyer/platforms/android/gps.py b/sbapp/plyer/platforms/android/gps.py index b173876..740b636 100644 --- a/sbapp/plyer/platforms/android/gps.py +++ b/sbapp/plyer/platforms/android/gps.py @@ -86,4 +86,4 @@ class AndroidGPS(GPS): def instance(): - return AndroidGPS() \ No newline at end of file + return AndroidGPS() diff --git a/sbapp/plyer/platforms/android/gyroscope.py b/sbapp/plyer/platforms/android/gyroscope.py index d6382b1..99b224e 100644 --- a/sbapp/plyer/platforms/android/gyroscope.py +++ b/sbapp/plyer/platforms/android/gyroscope.py @@ -110,7 +110,7 @@ class AndroidGyroscope(Gyroscope): return (None, None, None, None, None, None) def __del__(self): - if(self.bState): + if self.bState: self._disable() super().__del__() diff --git a/sbapp/plyer/platforms/android/notification.py b/sbapp/plyer/platforms/android/notification.py index ac9f76b..ee0de2d 100644 --- a/sbapp/plyer/platforms/android/notification.py +++ b/sbapp/plyer/platforms/android/notification.py @@ -154,10 +154,15 @@ class AndroidNotification(Notification): notification_intent.setFlags(Intent.FLAG_ACTIVITY_SINGLE_TOP) notification_intent.setAction(Intent.ACTION_MAIN) notification_intent.addCategory(Intent.CATEGORY_LAUNCHER) + if SDK_INT >= 23: + # FLAG_IMMUTABLE added in SDK 23, required since SDK 31: + pending_flags = PendingIntent.FLAG_IMMUTABLE + else: + pending_flags = 0 # get our application Activity pending_intent = PendingIntent.getActivity( - app_context, 0, notification_intent, 0 + app_context, 0, notification_intent, pending_flags ) notification.setContentIntent(pending_intent) @@ -179,8 +184,6 @@ class AndroidNotification(Notification): kwargs.get('title', '').encode('utf-8') ) icon = kwargs.get('app_icon') - notification_icon = kwargs.get('notification_icon') - context_override = kwargs.get('context_override') # decide whether toast only or proper notification if kwargs.get('toast'): diff --git a/sbapp/plyer/platforms/android/storagepath.py b/sbapp/plyer/platforms/android/storagepath.py index 788e3fc..d18ea61 100755 --- a/sbapp/plyer/platforms/android/storagepath.py +++ b/sbapp/plyer/platforms/android/storagepath.py @@ -3,14 +3,13 @@ Android Storage Path -------------------- ''' -from os import listdir, access, R_OK -from os.path import join from plyer.facades import StoragePath -from jnius import autoclass +from plyer.platforms.android import SDK_INT +from jnius import autoclass, cast from android import mActivity -Environment = autoclass('android.os.Environment') -Context = autoclass('android.content.Context') +Environment = autoclass("android.os.Environment") +Context = autoclass("android.content.Context") class AndroidStoragePath(StoragePath): @@ -25,17 +24,29 @@ class AndroidStoragePath(StoragePath): ''' .. versionadded:: 1.4.0 ''' - # folder in /storage/ that is readable - # and is not internal SD card path = None - for folder in listdir('/storage'): - folder = join('/storage', folder) - if folder in self._get_external_storage_dir(): - continue - if not access(folder, R_OK): - continue - path = folder - break + context = mActivity.getApplicationContext() + storage_manager = cast( + "android.os.storage.StorageManager", + context.getSystemService(Context.STORAGE_SERVICE), + ) + + if storage_manager is not None: + if SDK_INT >= 24: + storage_volumes = storage_manager.getStorageVolumes() + for storage_volume in storage_volumes: + if storage_volume.isRemovable(): + try: + directory = storage_volume.getDirectory() + except AttributeError: + directory = storage_volume.getPathFile() + path = directory.getAbsolutePath() + else: + storage_volumes = storage_manager.getVolumeList() + for storage_volume in storage_volumes: + if storage_volume.isRemovable(): + path = storage_volume.getPath() + return path def _get_root_dir(self): diff --git a/sbapp/plyer/platforms/ios/accelerometer.py b/sbapp/plyer/platforms/ios/accelerometer.py index 7250384..0f61f2b 100644 --- a/sbapp/plyer/platforms/ios/accelerometer.py +++ b/sbapp/plyer/platforms/ios/accelerometer.py @@ -6,7 +6,7 @@ Taken from: http://pyobjus.readthedocs.org/en/latest/pyobjus_ios.html \ #accessing-accelerometer ''' -from plyer.facades import Accelerometer +from sbapp.plyer.facades import Accelerometer from pyobjus import autoclass diff --git a/sbapp/plyer/platforms/ios/barometer.py b/sbapp/plyer/platforms/ios/barometer.py index a5c5dc3..9ec8d6e 100644 --- a/sbapp/plyer/platforms/ios/barometer.py +++ b/sbapp/plyer/platforms/ios/barometer.py @@ -3,7 +3,7 @@ iOS Barometer ------------- ''' -from plyer.facades import Barometer +from sbapp.plyer.facades import Barometer from pyobjus import autoclass diff --git a/sbapp/plyer/platforms/ios/battery.py b/sbapp/plyer/platforms/ios/battery.py index 818f0be..d7fa5d8 100644 --- a/sbapp/plyer/platforms/ios/battery.py +++ b/sbapp/plyer/platforms/ios/battery.py @@ -4,7 +4,7 @@ Module of iOS API for plyer.battery. from pyobjus import autoclass from pyobjus.dylib_manager import load_framework -from plyer.facades import Battery +from sbapp.plyer.facades import Battery load_framework('/System/Library/Frameworks/UIKit.framework') UIDevice = autoclass('UIDevice') diff --git a/sbapp/plyer/platforms/ios/brightness.py b/sbapp/plyer/platforms/ios/brightness.py index 065869a..f4a70f4 100644 --- a/sbapp/plyer/platforms/ios/brightness.py +++ b/sbapp/plyer/platforms/ios/brightness.py @@ -4,7 +4,7 @@ iOS Brightness ''' from pyobjus import autoclass -from plyer.facades import Brightness +from sbapp.plyer.facades import Brightness from pyobjus.dylib_manager import load_framework load_framework('/System/Library/Frameworks/UIKit.framework') diff --git a/sbapp/plyer/platforms/ios/call.py b/sbapp/plyer/platforms/ios/call.py index c751399..9999d6a 100644 --- a/sbapp/plyer/platforms/ios/call.py +++ b/sbapp/plyer/platforms/ios/call.py @@ -3,7 +3,7 @@ IOS Call ---------- ''' -from plyer.facades import Call +from sbapp.plyer.facades import Call from pyobjus import autoclass, objc_str NSURL = autoclass('NSURL') diff --git a/sbapp/plyer/platforms/ios/camera.py b/sbapp/plyer/platforms/ios/camera.py index dfe5265..a61bd96 100644 --- a/sbapp/plyer/platforms/ios/camera.py +++ b/sbapp/plyer/platforms/ios/camera.py @@ -1,7 +1,7 @@ from os import remove -from plyer.facades import Camera +from sbapp.plyer.facades import Camera -from plyer.utils import reify +from sbapp.plyer.utils import reify class iOSCamera(Camera): @@ -14,7 +14,7 @@ class iOSCamera(Camera): return PhotosLibrary() def _take_picture(self, on_complete, filename=None): - assert(on_complete is not None) + assert on_complete is not None self.on_complete = on_complete self.filename = filename photos = self.photos @@ -38,7 +38,7 @@ class iOSCamera(Camera): self._remove(self.filename) def _take_video(self, on_complete, filename=None): - assert(on_complete is not None) + assert on_complete is not None raise NotImplementedError def _remove(self, fn): diff --git a/sbapp/plyer/platforms/ios/compass.py b/sbapp/plyer/platforms/ios/compass.py index 855484b..a5c865f 100644 --- a/sbapp/plyer/platforms/ios/compass.py +++ b/sbapp/plyer/platforms/ios/compass.py @@ -3,7 +3,7 @@ iOS Compass ----------- ''' -from plyer.facades import Compass +from sbapp.plyer.facades import Compass from pyobjus import autoclass diff --git a/sbapp/plyer/platforms/ios/email.py b/sbapp/plyer/platforms/ios/email.py index e1cc7cb..ec54860 100644 --- a/sbapp/plyer/platforms/ios/email.py +++ b/sbapp/plyer/platforms/ios/email.py @@ -7,7 +7,7 @@ try: except ImportError: from urllib import quote -from plyer.facades import Email +from sbapp.plyer.facades import Email from pyobjus import autoclass, objc_str from pyobjus.dylib_manager import load_framework diff --git a/sbapp/plyer/platforms/ios/filechooser.py b/sbapp/plyer/platforms/ios/filechooser.py index ba49ba9..0320d6c 100644 --- a/sbapp/plyer/platforms/ios/filechooser.py +++ b/sbapp/plyer/platforms/ios/filechooser.py @@ -7,7 +7,7 @@ This module houses the iOS implementation of the plyer FileChooser. .. versionadded:: 1.4.4 ''' -from plyer.facades import FileChooser +from sbapp.plyer.facades import FileChooser from pyobjus import autoclass, protocol from pyobjus.dylib_manager import load_framework diff --git a/sbapp/plyer/platforms/ios/flash.py b/sbapp/plyer/platforms/ios/flash.py index e4bedac..17f59f1 100644 --- a/sbapp/plyer/platforms/ios/flash.py +++ b/sbapp/plyer/platforms/ios/flash.py @@ -3,7 +3,7 @@ Flash ----- """ -from plyer.facades import Flash +from sbapp.plyer.facades import Flash from pyobjus import autoclass NSString = autoclass("NSString") diff --git a/sbapp/plyer/platforms/ios/gps.py b/sbapp/plyer/platforms/ios/gps.py index 4c52738..d7ee0a7 100644 --- a/sbapp/plyer/platforms/ios/gps.py +++ b/sbapp/plyer/platforms/ios/gps.py @@ -5,7 +5,7 @@ iOS GPS from pyobjus import autoclass, protocol from pyobjus.dylib_manager import load_framework -from plyer.facades import GPS +from sbapp.plyer.facades import GPS load_framework('/System/Library/Frameworks/CoreLocation.framework') CLLocationManager = autoclass('CLLocationManager') diff --git a/sbapp/plyer/platforms/ios/gravity.py b/sbapp/plyer/platforms/ios/gravity.py index a2b1be0..9452bda 100644 --- a/sbapp/plyer/platforms/ios/gravity.py +++ b/sbapp/plyer/platforms/ios/gravity.py @@ -4,7 +4,7 @@ iOS Gravity ''' -from plyer.facades import Gravity +from sbapp.plyer.facades import Gravity from pyobjus import autoclass diff --git a/sbapp/plyer/platforms/ios/gyroscope.py b/sbapp/plyer/platforms/ios/gyroscope.py index 340653c..367e1bf 100644 --- a/sbapp/plyer/platforms/ios/gyroscope.py +++ b/sbapp/plyer/platforms/ios/gyroscope.py @@ -3,7 +3,7 @@ iOS Gyroscope --------------------- ''' -from plyer.facades import Gyroscope +from sbapp.plyer.facades import Gyroscope from pyobjus import autoclass from pyobjus.dylib_manager import load_framework diff --git a/sbapp/plyer/platforms/ios/keystore.py b/sbapp/plyer/platforms/ios/keystore.py index 289a5c0..a18cde2 100644 --- a/sbapp/plyer/platforms/ios/keystore.py +++ b/sbapp/plyer/platforms/ios/keystore.py @@ -1,4 +1,4 @@ -from plyer.facades import Keystore +from sbapp.plyer.facades import Keystore from pyobjus import autoclass, objc_str NSUserDefaults = autoclass('NSUserDefaults') diff --git a/sbapp/plyer/platforms/ios/maps.py b/sbapp/plyer/platforms/ios/maps.py new file mode 100644 index 0000000..dac01d5 --- /dev/null +++ b/sbapp/plyer/platforms/ios/maps.py @@ -0,0 +1,78 @@ +''' +Module of iOS API for plyer.maps. +''' + +import webbrowser +from sbapp.plyer.facades import Maps +from urllib.parse import quote_plus + + +class iOSMaps(Maps): + ''' + Implementation of iOS Maps API. + ''' + + def _open_by_address(self, address, **kwargs): + ''' + :param address: An address string that geolocation can understand. + ''' + + address = quote_plus(address, safe=',') + maps_address = 'http://maps.apple.com/?address=' + address + + webbrowser.open(maps_address) + + def _open_by_lat_long(self, latitude, longitude, **kwargs): + ''' + Open a coordinate span denoting a latitudinal delta and a + longitudinal delta (similar to MKCoordinateSpan) + + :param name: (optional), will set the name of the dropped pin + ''' + + name = kwargs.get("name", "Selected Location") + maps_address = 'http://maps.apple.com/?ll={},{}&q={}'.format( + latitude, longitude, name) + + webbrowser.open(maps_address) + + def _search(self, query, **kwargs): + ''' + :param query: A string that describes the search object (ex. "Pizza") + + :param latitude: (optional), narrow down query within area, + MUST BE USED WITH LONGITUDE + + :param longitude: (optional), narrow down query within area, + MUST BE USED WITH LATITUDE + ''' + + latitude = kwargs.get('latitude') + longitude = kwargs.get('longitude') + + query = quote_plus(query, safe=',') + maps_address = 'http://maps.apple.com/?q=' + query + + if latitude is not None and longitude is not None: + maps_address += '&sll={},{}'.format(latitude, longitude) + + webbrowser.open(maps_address) + + def _route(self, saddr, daddr, **kwargs): + ''' + :param saddr: can be given as 'address' or 'lat,long' + :param daddr: can be given as 'address' or 'lat,long' + ''' + saddr = quote_plus(saddr, safe=',') + daddr = quote_plus(daddr, safe=',') + + maps_address = 'http://maps.apple.com/?saddr={}&daddr={}'.format( + saddr, daddr) + webbrowser.open(maps_address) + + +def instance(): + ''' + Instance for facade proxy. + ''' + return iOSMaps() diff --git a/sbapp/plyer/platforms/ios/sms.py b/sbapp/plyer/platforms/ios/sms.py index ef3acaa..fc6b1b4 100644 --- a/sbapp/plyer/platforms/ios/sms.py +++ b/sbapp/plyer/platforms/ios/sms.py @@ -3,7 +3,7 @@ IOS Sms ---------- ''' -from plyer.facades import Sms +from sbapp.plyer.facades import Sms from pyobjus import autoclass, objc_str from pyobjus.dylib_manager import load_framework diff --git a/sbapp/plyer/platforms/ios/spatialorientation.py b/sbapp/plyer/platforms/ios/spatialorientation.py index d42d0fa..7c6d9cc 100644 --- a/sbapp/plyer/platforms/ios/spatialorientation.py +++ b/sbapp/plyer/platforms/ios/spatialorientation.py @@ -4,7 +4,7 @@ iOS Spatial Orientation ''' -from plyer.facades import SpatialOrientation +from sbapp.plyer.facades import SpatialOrientation from pyobjus import autoclass diff --git a/sbapp/plyer/platforms/ios/storagepath.py b/sbapp/plyer/platforms/ios/storagepath.py index cd8bbc3..c69a083 100644 --- a/sbapp/plyer/platforms/ios/storagepath.py +++ b/sbapp/plyer/platforms/ios/storagepath.py @@ -3,7 +3,7 @@ iOS Storage Path -------------------- ''' -from plyer.facades import StoragePath +from sbapp.plyer.facades import StoragePath from pyobjus import autoclass import os diff --git a/sbapp/plyer/platforms/ios/tts.py b/sbapp/plyer/platforms/ios/tts.py index 046769f..510399d 100644 --- a/sbapp/plyer/platforms/ios/tts.py +++ b/sbapp/plyer/platforms/ios/tts.py @@ -1,7 +1,7 @@ from pyobjus import autoclass, objc_str from pyobjus.dylib_manager import load_framework -from plyer.facades import TTS +from sbapp.plyer.facades import TTS load_framework('/System/Library/Frameworks/AVFoundation.framework') AVSpeechUtterance = autoclass('AVSpeechUtterance') @@ -23,7 +23,7 @@ class iOSTextToSpeech(TTS): def _speak(self, **kwargs): message = kwargs.get('message') - if(not self.voice): + if not self.voice: self._set_locale() utterance = \ diff --git a/sbapp/plyer/platforms/ios/uniqueid.py b/sbapp/plyer/platforms/ios/uniqueid.py index 4d30249..e9fa815 100644 --- a/sbapp/plyer/platforms/ios/uniqueid.py +++ b/sbapp/plyer/platforms/ios/uniqueid.py @@ -4,7 +4,7 @@ Module of iOS API for plyer.uniqueid. from pyobjus import autoclass from pyobjus.dylib_manager import load_framework -from plyer.facades import UniqueID +from sbapp.plyer.facades import UniqueID load_framework('/System/Library/Frameworks/UIKit.framework') UIDevice = autoclass('UIDevice') diff --git a/sbapp/plyer/platforms/ios/vibrator.py b/sbapp/plyer/platforms/ios/vibrator.py index 922182c..05c12c5 100644 --- a/sbapp/plyer/platforms/ios/vibrator.py +++ b/sbapp/plyer/platforms/ios/vibrator.py @@ -4,7 +4,7 @@ Install: Add AudioToolbox framework to your application. ''' import ctypes -from plyer.facades import Vibrator +from sbapp.plyer.facades import Vibrator class IosVibrator(Vibrator): diff --git a/sbapp/plyer/platforms/linux/accelerometer.py b/sbapp/plyer/platforms/linux/accelerometer.py index 7af283b..889de15 100644 --- a/sbapp/plyer/platforms/linux/accelerometer.py +++ b/sbapp/plyer/platforms/linux/accelerometer.py @@ -3,7 +3,7 @@ Linux accelerometer --------------------- ''' -from plyer.facades import Accelerometer +from sbapp.plyer.facades import Accelerometer import glob import re diff --git a/sbapp/plyer/platforms/linux/audio.py b/sbapp/plyer/platforms/linux/audio.py new file mode 100644 index 0000000..091e9e5 --- /dev/null +++ b/sbapp/plyer/platforms/linux/audio.py @@ -0,0 +1,139 @@ +import time +import threading +import RNS +import io +from sbapp.plyer.facades.audio import Audio +from ffpyplayer.player import MediaPlayer +from sbapp.pyogg import OpusFile, OpusBufferedEncoder, OggOpusWriter +import pyaudio + +class LinuxAudio(Audio): + + def __init__(self, file_path=None): + default_path = None + super().__init__(file_path or default_path) + + self._recorder = None + self._player = None + self._check_thread = None + self._finished_callback = None + self._loaded_path = None + self.sound = None + self.pa = None + self.is_playing = False + self.recorder = None + self.should_record = False + + def _check_playback(self): + run = True + while run and self.sound != None and not self.sound.get_pause(): + time.sleep(0.25) + if self.duration: + pts = self.sound.get_pts() + if pts > self.duration: + run = False + + self.is_playing = False + + if self._finished_callback and callable(self._finished_callback): + self._check_thread = None + self._finished_callback(self) + + def _record_job(self): + samples_per_second = self.default_rate; + bytes_per_sample = 2; frame_duration_ms = 20 + opus_buffered_encoder = OpusBufferedEncoder() + opus_buffered_encoder.set_application("voip") + opus_buffered_encoder.set_sampling_frequency(samples_per_second) + opus_buffered_encoder.set_channels(1) + opus_buffered_encoder.set_frame_size(frame_duration_ms) + ogg_opus_writer = OggOpusWriter(self._file_path, opus_buffered_encoder) + + frame_duration = frame_duration_ms/1000 + frame_size = int(frame_duration * samples_per_second) + bytes_per_frame = frame_size*bytes_per_sample + + read_bytes = 0 + pcm_buf = b"" + should_continue = True + while self.should_record and self.recorder: + samples_available = self.recorder.get_read_available() + bytes_available = samples_available*bytes_per_sample + if bytes_available > 0: + read_req = bytes_per_frame - len(pcm_buf) + read_n = min(bytes_available, read_req) + read_s = read_n//bytes_per_sample + rb = self.recorder.read(read_s); read_bytes += len(rb) + pcm_buf += rb + + if len(pcm_buf) == bytes_per_frame: + ogg_opus_writer.write(memoryview(bytearray(pcm_buf))) + # RNS.log("Wrote frame of "+str(len(pcm_buf))+", expected size "+str(bytes_per_frame)) + pcm_buf = b"" + + # Finish up anything left in buffer + time.sleep(frame_duration) + samples_available = self.recorder.get_read_available() + bytes_available = samples_available*bytes_per_sample + if bytes_available > 0: + read_req = bytes_per_frame - len(pcm_buf) + read_n = min(bytes_available, read_req) + read_s = read_n//bytes_per_sample + rb = self.recorder.read(read_s); read_bytes += len(rb) + pcm_buf += rb + + if len(pcm_buf) == bytes_per_frame: + ogg_opus_writer.write(memoryview(bytearray(pcm_buf))) + # RNS.log("Wrote frame of "+str(len(pcm_buf))+", expected size "+str(bytes_per_frame)) + pcm_buf = b"" + + ogg_opus_writer.close() + if self.recorder: + self.recorder.close() + + def _start(self): + self.should_record = True + if self.pa == None: + self.pa = pyaudio.PyAudio() + self.default_input_device = self.pa.get_default_input_device_info() + self.default_rate = 48000 + # self.default_rate = int(self.default_input_device["defaultSampleRate"]) + if self.recorder: + self.recorder.close() + self.recorder = None + self.recorder = self.pa.open(self.default_rate, 1, pyaudio.paInt16, input=True) + threading.Thread(target=self._record_job, daemon=True).start() + + def _stop(self): + if self.should_record == True: + self.should_record = False + + elif self.sound != None: + self.sound.set_pause(True) + self.sound.seek(0, relative=False) + self.is_playing = False + + def _play(self): + self.sound = MediaPlayer(self._file_path) + self.metadata = self.sound.get_metadata() + self.duration = self.metadata["duration"] + if self.duration == None: + time.sleep(0.15) + self.metadata = self.sound.get_metadata() + self.duration = self.metadata["duration"] + + self._loaded_path = self._file_path + self.is_playing = True + + self._check_thread = threading.Thread(target=self._check_playback, daemon=True) + self._check_thread.start() + + def reload(self): + self._loaded_path = None + + def playing(self): + return self.is_playing + + +def instance(): + return LinuxAudio() diff --git a/sbapp/plyer/platforms/linux/battery.py b/sbapp/plyer/platforms/linux/battery.py index b0c4691..9fc9f9f 100644 --- a/sbapp/plyer/platforms/linux/battery.py +++ b/sbapp/plyer/platforms/linux/battery.py @@ -2,13 +2,12 @@ Module of Linux API for plyer.battery. ''' -import os from math import floor from os import environ from os.path import exists, join from subprocess import Popen, PIPE -from plyer.facades import Battery -from plyer.utils import whereis_exe +from sbapp.plyer.facades import Battery +from sbapp.plyer.utils import whereis_exe class LinuxBattery(Battery): @@ -20,10 +19,10 @@ class LinuxBattery(Battery): def _get_state(self): status = {"isCharging": None, "percentage": None} - kernel_bat_path = join('/sys', 'class', 'power_supply', self.node_name) + kernel_bat_path = join('/sys', 'class', 'power_supply', 'BAT0') uevent = join(kernel_bat_path, 'uevent') - with open(uevent, "rb") as fle: + with open(uevent) as fle: lines = [ line.decode('utf-8').strip() for line in fle.readlines() @@ -34,34 +33,70 @@ class LinuxBattery(Battery): } is_charging = output['POWER_SUPPLY_STATUS'] == 'Charging' - charge_percent = float(output['POWER_SUPPLY_CAPACITY']) + total = float(output['POWER_SUPPLY_CHARGE_FULL']) + now = float(output['POWER_SUPPLY_CHARGE_NOW']) - status['percentage'] = charge_percent + capacity = floor(now / total * 100) + + status['percentage'] = capacity status['isCharging'] = is_charging return status +class UPowerBattery(Battery): + ''' + Implementation of UPower battery API. + ''' + + def _get_state(self): + # if no LANG specified, return empty string + old_lang = environ.get('LANG', '') + environ['LANG'] = 'C' + status = {"isCharging": None, "percentage": None} + + # We are supporting only one battery now + # this will fail if there is no object with such path, + # however it's safer than 'upower -d' which provides + # multiple unrelated 'state' and 'percentage' keywords + dev = "/org/freedesktop/UPower/devices/battery_BAT0" + upower_process = Popen( + ["upower", "--show-info", dev], + stdout=PIPE + ) + output = upower_process.communicate()[0].decode() + environ['LANG'] = old_lang + if not output: + return status + state = percentage = None + + for line in output.splitlines(): + if 'state' in line: + state = line.rpartition(':')[-1].strip() + + if 'percentage' in line: + percentage = line.rpartition(':')[-1].strip()[:-1] + + # switching decimal comma to dot + # (different LC_NUMERIC locale) + percentage = float( + percentage.replace(',', '.') + ) + + if state: + status['isCharging'] = state == "charging" + status['percentage'] = percentage + return status + + def instance(): ''' Instance for facade proxy. ''' import sys - # if whereis_exe('upower'): - # return UPowerBattery() - # sys.stderr.write("upower not found.") - - node_exists = False - bn = 0 - node_name = None - for bi in range(0,10): - path = join('/sys', 'class', 'power_supply', 'BAT'+str(bi)) - if os.path.isdir(path): - node_name = "BAT"+str(bi) - break - - if node_name: - b = LinuxBattery() - b.node_name = node_name - return b + if whereis_exe('upower'): + return UPowerBattery() + sys.stderr.write("upower not found.") + if exists(join('/sys', 'class', 'power_supply', 'BAT0')): + return LinuxBattery() return Battery() diff --git a/sbapp/plyer/platforms/linux/brightness.py b/sbapp/plyer/platforms/linux/brightness.py index 0de1698..eb30536 100755 --- a/sbapp/plyer/platforms/linux/brightness.py +++ b/sbapp/plyer/platforms/linux/brightness.py @@ -4,7 +4,7 @@ Linux Brightness ''' -from plyer.facades import Brightness +from sbapp.plyer.facades import Brightness import subprocess import os diff --git a/sbapp/plyer/platforms/linux/cpu.py b/sbapp/plyer/platforms/linux/cpu.py index 049a2d4..66a532f 100644 --- a/sbapp/plyer/platforms/linux/cpu.py +++ b/sbapp/plyer/platforms/linux/cpu.py @@ -5,8 +5,8 @@ Module of Linux API for plyer.cpu. from os.path import join from os import environ, listdir from subprocess import Popen, PIPE -from plyer.facades import CPU -from plyer.utils import whereis_exe +from sbapp.plyer.facades import CPU +from sbapp.plyer.utils import whereis_exe class LinuxCPU(CPU): diff --git a/sbapp/plyer/platforms/linux/devicename.py b/sbapp/plyer/platforms/linux/devicename.py index 8941379..cbd9008 100644 --- a/sbapp/plyer/platforms/linux/devicename.py +++ b/sbapp/plyer/platforms/linux/devicename.py @@ -3,7 +3,7 @@ Module of Linux API for plyer.devicename. ''' import socket -from plyer.facades import DeviceName +from sbapp.plyer.facades import DeviceName class LinuxDeviceName(DeviceName): diff --git a/sbapp/plyer/platforms/linux/email.py b/sbapp/plyer/platforms/linux/email.py index 64cbb6d..90efce9 100644 --- a/sbapp/plyer/platforms/linux/email.py +++ b/sbapp/plyer/platforms/linux/email.py @@ -7,8 +7,8 @@ try: from urllib.parse import quote except ImportError: from urllib import quote -from plyer.facades import Email -from plyer.utils import whereis_exe +from sbapp.plyer.facades import Email +from sbapp.plyer.utils import whereis_exe class LinuxEmail(Email): diff --git a/sbapp/plyer/platforms/linux/filechooser.py b/sbapp/plyer/platforms/linux/filechooser.py index cfdb1ae..a8e2ef2 100644 --- a/sbapp/plyer/platforms/linux/filechooser.py +++ b/sbapp/plyer/platforms/linux/filechooser.py @@ -3,7 +3,7 @@ Linux file chooser ------------------ ''' -from plyer.facades import FileChooser +from sbapp.plyer.facades import FileChooser from distutils.spawn import find_executable as which import os import subprocess as sp @@ -122,7 +122,7 @@ class ZenityFileChooser(SubprocessFileChooser): if self.icon: cmdline += ["--window-icon", self.icon] for f in self.filters: - if type(f) == str: + if isinstance(f, str): cmdline += ["--file-filter", f] else: cmdline += [ @@ -150,7 +150,7 @@ class KDialogFileChooser(SubprocessFileChooser): filt = [] for f in self.filters: - if type(f) == str: + if isinstance(f, str): filt += [f] else: filt += list(f[1:]) @@ -195,7 +195,7 @@ class YADFileChooser(SubprocessFileChooser): def _gen_cmdline(self): cmdline = [ which(self.executable), - "--file-selection", + "--file", "--confirm-overwrite", "--geometry", "800x600+150+150" @@ -215,7 +215,7 @@ class YADFileChooser(SubprocessFileChooser): if self.icon: cmdline += ["--window-icon", self.icon] for f in self.filters: - if type(f) == str: + if isinstance(f, str): cmdline += ["--file-filter", f] else: cmdline += [ diff --git a/sbapp/plyer/platforms/linux/keystore.py b/sbapp/plyer/platforms/linux/keystore.py index 105ebd3..b43be70 100644 --- a/sbapp/plyer/platforms/linux/keystore.py +++ b/sbapp/plyer/platforms/linux/keystore.py @@ -3,7 +3,7 @@ try: except ImportError: raise NotImplementedError() -from plyer.facades import Keystore +from sbapp.plyer.facades import Keystore class LinuxKeystore(Keystore): diff --git a/sbapp/plyer/platforms/linux/notification.py b/sbapp/plyer/platforms/linux/notification.py index 95a6472..552519e 100644 --- a/sbapp/plyer/platforms/linux/notification.py +++ b/sbapp/plyer/platforms/linux/notification.py @@ -4,8 +4,8 @@ Module of Linux API for plyer.notification. import warnings import subprocess -from plyer.facades import Notification -from plyer.utils import whereis_exe +from sbapp.plyer.facades import Notification +from sbapp.plyer.utils import whereis_exe import os @@ -63,7 +63,7 @@ class NotifyDbus(Notification): def _notify(self, **kwargs): summary = kwargs.get('title', "title") body = kwargs.get('message', "body") - app_name = kwargs.get('app_name', '') + app_name = "Sideband" app_icon = kwargs.get('app_icon', '') timeout = kwargs.get('timeout', 10) actions = kwargs.get('actions', []) diff --git a/sbapp/plyer/platforms/linux/orientation.py b/sbapp/plyer/platforms/linux/orientation.py index e60fa42..c190128 100644 --- a/sbapp/plyer/platforms/linux/orientation.py +++ b/sbapp/plyer/platforms/linux/orientation.py @@ -1,5 +1,5 @@ import subprocess as sb -from plyer.facades import Orientation +from sbapp.plyer.facades import Orientation class LinuxOrientation(Orientation): diff --git a/sbapp/plyer/platforms/linux/processors.py b/sbapp/plyer/platforms/linux/processors.py index 74bb73a..332ec83 100644 --- a/sbapp/plyer/platforms/linux/processors.py +++ b/sbapp/plyer/platforms/linux/processors.py @@ -1,6 +1,6 @@ from subprocess import Popen, PIPE -from plyer.facades import Processors -from plyer.utils import whereis_exe +from sbapp.plyer.facades import Processors +from sbapp.plyer.utils import whereis_exe from os import environ diff --git a/sbapp/plyer/platforms/linux/screenshot.py b/sbapp/plyer/platforms/linux/screenshot.py index 00eeb5c..5dc2ccf 100644 --- a/sbapp/plyer/platforms/linux/screenshot.py +++ b/sbapp/plyer/platforms/linux/screenshot.py @@ -1,8 +1,8 @@ import subprocess from os.path import join -from plyer.facades import Screenshot -from plyer.utils import whereis_exe -from plyer.platforms.linux.storagepath import LinuxStoragePath +from sbapp.plyer.facades import Screenshot +from sbapp.plyer.utils import whereis_exe +from sbapp.plyer.platforms.linux.storagepath import LinuxStoragePath class LinuxScreenshot(Screenshot): diff --git a/sbapp/plyer/platforms/linux/storagepath.py b/sbapp/plyer/platforms/linux/storagepath.py index 736674a..b32a5fe 100755 --- a/sbapp/plyer/platforms/linux/storagepath.py +++ b/sbapp/plyer/platforms/linux/storagepath.py @@ -3,7 +3,7 @@ Linux Storage Path -------------------- ''' -from plyer.facades import StoragePath +from sbapp.plyer.facades import StoragePath from os.path import expanduser, dirname, abspath, join, exists # Default paths for each name diff --git a/sbapp/plyer/platforms/linux/tts.py b/sbapp/plyer/platforms/linux/tts.py index 3932772..3b8b634 100644 --- a/sbapp/plyer/platforms/linux/tts.py +++ b/sbapp/plyer/platforms/linux/tts.py @@ -1,6 +1,6 @@ import subprocess -from plyer.facades import TTS -from plyer.utils import whereis_exe +from sbapp.plyer.facades import TTS +from sbapp.plyer.utils import whereis_exe class EspeakTextToSpeech(TTS): diff --git a/sbapp/plyer/platforms/linux/uniqueid.py b/sbapp/plyer/platforms/linux/uniqueid.py index 44926b1..30c1112 100644 --- a/sbapp/plyer/platforms/linux/uniqueid.py +++ b/sbapp/plyer/platforms/linux/uniqueid.py @@ -4,8 +4,8 @@ Module of Linux API for plyer.uniqueid. from os import environ from subprocess import Popen, PIPE -from plyer.facades import UniqueID -from plyer.utils import whereis_exe +from sbapp.plyer.facades import UniqueID +from sbapp.plyer.utils import whereis_exe class LinuxUniqueID(UniqueID): diff --git a/sbapp/plyer/platforms/linux/wifi.py b/sbapp/plyer/platforms/linux/wifi.py index 73f09b6..144f58a 100644 --- a/sbapp/plyer/platforms/linux/wifi.py +++ b/sbapp/plyer/platforms/linux/wifi.py @@ -6,8 +6,8 @@ ''' from subprocess import Popen, PIPE, call -from plyer.facades import Wifi -from plyer.utils import whereis_exe, deprecated +from sbapp.plyer.facades import Wifi +from sbapp.plyer.utils import whereis_exe, deprecated try: import wifi diff --git a/sbapp/plyer/platforms/macosx/accelerometer.py b/sbapp/plyer/platforms/macosx/accelerometer.py index 91e72c3..5b52578 100644 --- a/sbapp/plyer/platforms/macosx/accelerometer.py +++ b/sbapp/plyer/platforms/macosx/accelerometer.py @@ -3,8 +3,8 @@ MacOSX accelerometer --------------------- ''' -from plyer.facades import Accelerometer -from plyer.platforms.macosx.libs import osx_motion_sensor +from sbapp.plyer.facades import Accelerometer +from sbapp.plyer.platforms.macosx.libs import osx_motion_sensor class OSXAccelerometer(Accelerometer): diff --git a/sbapp/plyer/platforms/macosx/audio.py b/sbapp/plyer/platforms/macosx/audio.py index 3ab9ce1..1f2069e 100644 --- a/sbapp/plyer/platforms/macosx/audio.py +++ b/sbapp/plyer/platforms/macosx/audio.py @@ -3,8 +3,10 @@ from os.path import join from pyobjus import autoclass from pyobjus.dylib_manager import INCLUDE, load_framework -from plyer.facades import Audio -from plyer.platforms.macosx.storagepath import OSXStoragePath +from sbapp.plyer.facades import Audio +from sbapp.plyer.platforms.macosx.storagepath import OSXStoragePath + +import threading load_framework(INCLUDE.Foundation) load_framework(INCLUDE.AVFoundation) @@ -19,16 +21,31 @@ NSError = autoclass('NSError').alloc() class OSXAudio(Audio): def __init__(self, file_path=None): - default_path = join( - OSXStoragePath().get_music_dir(), - 'audio.wav' - ) + default_path = None super().__init__(file_path or default_path) self._recorder = None self._player = None self._current_file = None + self._check_thread = None + self._finished_callback = None + self._loaded_path = None + self.is_playing = False + self.sound = None + self.pa = None + self.is_playing = False + self.recorder = None + self.should_record = False + + def _check_playback(self): + while self._player and self._player.isPlaying: + time.sleep(0.25) + + if self._finished_callback and callable(self._finished_callback): + self._check_thread = None + self._finished_callback(self) + def _start(self): # Conversion of Python file path string to Objective-C NSString file_path_NSString = NSString.alloc() @@ -44,7 +61,7 @@ class OSXAudio(Audio): # Internal audio file format specification af = AVAudioFormat.alloc() af = af.initWithCommonFormat_sampleRate_channels_interleaved_( - 1, 44100.0, 2, True + 1, 44100.0, 1, True ) # Audio recorder instance initialization with specified file NSURL @@ -73,6 +90,18 @@ class OSXAudio(Audio): self._player = None def _play(self): + # Conversion of Python file path string to Objective-C NSString + file_path_NSString = NSString.alloc() + file_path_NSString = file_path_NSString.initWithUTF8String_( + self._file_path + ) + + # Definition of Objective-C NSURL object for the output record file + # specified by NSString file path + file_NSURL = NSURL.alloc() + file_NSURL = file_NSURL.initWithString_(file_path_NSString) + self._current_file = file_NSURL + # Audio player instance initialization with the file NSURL # of the last recorded audio file self._player = AVAudioPlayer.alloc() @@ -85,6 +114,15 @@ class OSXAudio(Audio): self._player.play() + self._check_thread = threading.Thread(target=self._check_playback, daemon=True) + self._check_thread.start() + + def reload(self): + self._loaded_path = None + + def playing(self): + return self.is_playing + def instance(): return OSXAudio() diff --git a/sbapp/plyer/platforms/macosx/battery.py b/sbapp/plyer/platforms/macosx/battery.py index 57965d3..ec42486 100644 --- a/sbapp/plyer/platforms/macosx/battery.py +++ b/sbapp/plyer/platforms/macosx/battery.py @@ -4,8 +4,8 @@ Module of MacOS API for plyer.battery. from os import environ from subprocess import Popen, PIPE -from plyer.facades import Battery -from plyer.utils import whereis_exe +from sbapp.plyer.facades import Battery +from sbapp.plyer.utils import whereis_exe class OSXBattery(Battery): diff --git a/sbapp/plyer/platforms/macosx/bluetooth.py b/sbapp/plyer/platforms/macosx/bluetooth.py index 2575817..7c2ec22 100644 --- a/sbapp/plyer/platforms/macosx/bluetooth.py +++ b/sbapp/plyer/platforms/macosx/bluetooth.py @@ -3,8 +3,8 @@ Module of MacOS API for plyer.bluetooth. ''' from subprocess import Popen, PIPE -from plyer.facades import Bluetooth -from plyer.utils import whereis_exe +from sbapp.plyer.facades import Bluetooth +from sbapp.plyer.utils import whereis_exe from os import environ diff --git a/sbapp/plyer/platforms/macosx/cpu.py b/sbapp/plyer/platforms/macosx/cpu.py index 7b7da7f..26eeebb 100644 --- a/sbapp/plyer/platforms/macosx/cpu.py +++ b/sbapp/plyer/platforms/macosx/cpu.py @@ -3,8 +3,8 @@ Module of MacOS API for plyer.cpu. ''' from subprocess import Popen, PIPE -from plyer.facades import CPU -from plyer.utils import whereis_exe +from sbapp.plyer.facades import CPU +from sbapp.plyer.utils import whereis_exe class OSXCPU(CPU): diff --git a/sbapp/plyer/platforms/macosx/devicename.py b/sbapp/plyer/platforms/macosx/devicename.py index 6652425..e100246 100644 --- a/sbapp/plyer/platforms/macosx/devicename.py +++ b/sbapp/plyer/platforms/macosx/devicename.py @@ -3,7 +3,7 @@ Module of MacOSX API for plyer.devicename. ''' import socket -from plyer.facades import DeviceName +from sbapp.plyer.facades import DeviceName class OSXDeviceName(DeviceName): diff --git a/sbapp/plyer/platforms/macosx/email.py b/sbapp/plyer/platforms/macosx/email.py index 49c5d4d..42295ad 100644 --- a/sbapp/plyer/platforms/macosx/email.py +++ b/sbapp/plyer/platforms/macosx/email.py @@ -9,8 +9,8 @@ try: except ImportError: from urllib import quote -from plyer.facades import Email -from plyer.utils import whereis_exe +from sbapp.plyer.facades import Email +from sbapp.plyer.utils import whereis_exe class MacOSXEmail(Email): diff --git a/sbapp/plyer/platforms/macosx/filechooser.py b/sbapp/plyer/platforms/macosx/filechooser.py index 85fb91c..1d48062 100644 --- a/sbapp/plyer/platforms/macosx/filechooser.py +++ b/sbapp/plyer/platforms/macosx/filechooser.py @@ -3,7 +3,7 @@ Mac OS X file chooser --------------------- ''' -from plyer.facades import FileChooser +from sbapp.plyer.facades import FileChooser from pyobjus import autoclass, objc_arr, objc_str from pyobjus.dylib_manager import load_framework, INCLUDE @@ -80,7 +80,7 @@ class MacFileChooser: if self.filters: filthies = [] for f in self.filters: - if type(f) == str: + if isinstance(f, str): f = (None, f) for s in f[1:]: if not self.use_extensions: diff --git a/sbapp/plyer/platforms/macosx/keystore.py b/sbapp/plyer/platforms/macosx/keystore.py index 11e60e8..ec00cff 100644 --- a/sbapp/plyer/platforms/macosx/keystore.py +++ b/sbapp/plyer/platforms/macosx/keystore.py @@ -3,7 +3,7 @@ try: except ImportError: raise NotImplementedError() -from plyer.facades import Keystore +from sbapp.plyer.facades import Keystore class OSXKeystore(Keystore): diff --git a/sbapp/plyer/platforms/macosx/libs/osx_motion_sensor.py b/sbapp/plyer/platforms/macosx/libs/osx_motion_sensor.py index d49df43..a5dc76a 100644 --- a/sbapp/plyer/platforms/macosx/libs/osx_motion_sensor.py +++ b/sbapp/plyer/platforms/macosx/libs/osx_motion_sensor.py @@ -86,7 +86,7 @@ def read_sms(): inStructure = data_structure() outStructure = data_structure() - if(is_os_64bit() or hasattr(IOKit, 'IOConnectCallStructMethod')): + if is_os_64bit() or hasattr(IOKit, 'IOConnectCallStructMethod'): structureInSize = IOItemCount(sizeof(data_structure)) structureOutSize = c_size_t(sizeof(data_structure)) @@ -120,7 +120,7 @@ def get_coord(): ret, data = read_sms() if (ret > 0): - if(data.x): + if data.x: return (data.x, data.y, data.z) else: return (None, None, None) diff --git a/sbapp/plyer/platforms/macosx/maps.py b/sbapp/plyer/platforms/macosx/maps.py new file mode 100644 index 0000000..7a6d998 --- /dev/null +++ b/sbapp/plyer/platforms/macosx/maps.py @@ -0,0 +1,90 @@ +''' +Module of macOS API for plyer.maps. +''' + +from subprocess import Popen, PIPE +from sbapp.plyer.facades import Maps +from urllib.parse import quote_plus + + +class MacOSMaps(Maps): + ''' + Implementation of MacOS Maps API. + ''' + + def _open_by_address(self, address, **kwargs): + ''' + :param address: An address string that geolocation can understand. + ''' + + address = quote_plus(address, safe=',') + maps_address = 'http://maps.apple.com/?address=' + address + + process = Popen( + ['open', '-a', 'Maps', maps_address], + stdout=PIPE, stderr=PIPE) + stdout, stderr = process.communicate() + + def _open_by_lat_long(self, latitude, longitude, **kwargs): + ''' + Open a coordinate span denoting a latitudinal delta and a + longitudinal delta (similar to MKCoordinateSpan) + + :param name: (optional), will set the name of the dropped pin + ''' + + name = kwargs.get("name", "Selected Location") + maps_address = 'http://maps.apple.com/?ll={},{}&q={}'.format( + latitude, longitude, name) + + process = Popen( + ['open', '-a', 'Maps', maps_address], + stdout=PIPE, stderr=PIPE) + stdout, stderr = process.communicate() + + def _search(self, query, **kwargs): + ''' + :param query: A string that describes the search object (ex. "Pizza") + + :param latitude: (optional), narrow down query within area, + MUST BE USED WITH LONGITUDE + + :param longitude: (optional), narrow down query within area, + MUST BE USED WITH LATITUDE + ''' + + latitude = kwargs.get('latitude') + longitude = kwargs.get('longitude') + + query = quote_plus(query, safe=',') + maps_address = 'http://maps.apple.com/?q=' + query + + if latitude is not None and longitude is not None: + maps_address += '&sll={},{}'.format(latitude, longitude) + + process = Popen( + ['open', '-a', 'Maps', maps_address], + stdout=PIPE, stderr=PIPE) + stdout, stderr = process.communicate() + + def _route(self, saddr, daddr, **kwargs): + ''' + :param saddr: can be given as 'address' or 'lat,long' + :param daddr: can be given as 'address' or 'lat,long' + ''' + saddr = quote_plus(saddr, safe=',') + daddr = quote_plus(daddr, safe=',') + + maps_address = 'http://maps.apple.com/?saddr={}&daddr={}'.format( + saddr, daddr) + process = Popen( + ['open', '-a', 'Maps', maps_address], + stdout=PIPE, stderr=PIPE) + stdout, stderr = process.communicate() + + +def instance(): + ''' + Instance for facade proxy. + ''' + return MacOSMaps() diff --git a/sbapp/plyer/platforms/macosx/notification.py b/sbapp/plyer/platforms/macosx/notification.py index ed78ab1..6a94bbd 100644 --- a/sbapp/plyer/platforms/macosx/notification.py +++ b/sbapp/plyer/platforms/macosx/notification.py @@ -2,7 +2,7 @@ Module of MacOS API for plyer.notification. ''' -from plyer.facades import Notification +from sbapp.plyer.facades import Notification from pyobjus import ( autoclass, protocol, objc_str, ObjcBOOL @@ -26,7 +26,7 @@ class OSXNotification(Notification): def _notify(self, **kwargs): title = kwargs.get('title', '') message = kwargs.get('message', '') - app_name = kwargs.get('app_name', '') + app_name = "Sideband" # app_icon, timeout, ticker are not supported (yet) notification = NSUserNotification.alloc().init() diff --git a/sbapp/plyer/platforms/macosx/screenshot.py b/sbapp/plyer/platforms/macosx/screenshot.py index c76766a..16a35c4 100644 --- a/sbapp/plyer/platforms/macosx/screenshot.py +++ b/sbapp/plyer/platforms/macosx/screenshot.py @@ -1,8 +1,8 @@ import subprocess from os.path import join -from plyer.facades import Screenshot -from plyer.utils import whereis_exe -from plyer.platforms.macosx.storagepath import OSXStoragePath +from sbapp.plyer.facades import Screenshot +from sbapp.plyer.utils import whereis_exe +from sbapp.plyer.platforms.macosx.storagepath import OSXStoragePath class OSXScreenshot(Screenshot): diff --git a/sbapp/plyer/platforms/macosx/sms.py b/sbapp/plyer/platforms/macosx/sms.py new file mode 100644 index 0000000..8634443 --- /dev/null +++ b/sbapp/plyer/platforms/macosx/sms.py @@ -0,0 +1,42 @@ +from subprocess import Popen, PIPE +from sbapp.plyer.facades import Sms as SMS +from sbapp.plyer.utils import whereis_exe + + +class MacOSSMS(SMS): + ''' + Implementation of macOS' Messages API + ''' + + def _send(self, **kwargs): + ''' + Will send `message` to `recipient` via Messages app + + By default, if `mode` is not explicitly set, `iMessage` is used. + In order to use `SMS` mode, a valid carrier-activated device must + be connected and configured. + ''' + + recipient = kwargs.get('recipient') + message = kwargs.get('message') + mode = kwargs.get('mode') # Supported modes: iMessage (default), SMS + if not mode: + mode = 'iMessage' + + APPLESCRIPT = f"""tell application "Messages" + set targetService to 1st account whose service type = {mode} + set targetBuddy to participant "{recipient}" of targetService + send "{message}" to targetBuddy +end tell""" + + osascript_process = Popen( + ['osascript', '-e', APPLESCRIPT], stdout=PIPE, stderr=PIPE) + stdout, stderr = osascript_process.communicate() + + +def instance(): + import sys + if whereis_exe('osascript'): + return MacOSSMS() + sys.stderr.write('osascript not found.') + return SMS() diff --git a/sbapp/plyer/platforms/macosx/storagepath.py b/sbapp/plyer/platforms/macosx/storagepath.py index 027b04e..79973a4 100644 --- a/sbapp/plyer/platforms/macosx/storagepath.py +++ b/sbapp/plyer/platforms/macosx/storagepath.py @@ -3,7 +3,7 @@ MacOS X Storage Path -------------------- ''' -from plyer.facades import StoragePath +from sbapp.plyer.facades import StoragePath from pyobjus import autoclass NSFileManager = autoclass('NSFileManager') diff --git a/sbapp/plyer/platforms/macosx/tts.py b/sbapp/plyer/platforms/macosx/tts.py index 755e820..464de56 100644 --- a/sbapp/plyer/platforms/macosx/tts.py +++ b/sbapp/plyer/platforms/macosx/tts.py @@ -1,6 +1,6 @@ import subprocess -from plyer.facades import TTS -from plyer.utils import whereis_exe +from sbapp.plyer.facades import TTS +from sbapp.plyer.utils import whereis_exe class NativeSayTextToSpeech(TTS): diff --git a/sbapp/plyer/platforms/macosx/uniqueid.py b/sbapp/plyer/platforms/macosx/uniqueid.py index dc84153..464e723 100644 --- a/sbapp/plyer/platforms/macosx/uniqueid.py +++ b/sbapp/plyer/platforms/macosx/uniqueid.py @@ -4,8 +4,8 @@ Module of MacOS API for plyer.uniqueid. from os import environ from subprocess import Popen, PIPE -from plyer.facades import UniqueID -from plyer.utils import whereis_exe +from sbapp.plyer.facades import UniqueID +from sbapp.plyer.utils import whereis_exe class OSXUniqueID(UniqueID): diff --git a/sbapp/plyer/platforms/macosx/wifi.py b/sbapp/plyer/platforms/macosx/wifi.py index 32c02ab..61da385 100644 --- a/sbapp/plyer/platforms/macosx/wifi.py +++ b/sbapp/plyer/platforms/macosx/wifi.py @@ -1,7 +1,7 @@ from pyobjus import autoclass from pyobjus.dylib_manager import load_framework, INCLUDE -from plyer.facades import Wifi +from sbapp.plyer.facades import Wifi load_framework(INCLUDE.Foundation) load_framework(INCLUDE.CoreWLAN) diff --git a/sbapp/plyer/platforms/win/audio.py b/sbapp/plyer/platforms/win/audio.py index c9d8f35..c74d97d 100644 --- a/sbapp/plyer/platforms/win/audio.py +++ b/sbapp/plyer/platforms/win/audio.py @@ -14,8 +14,8 @@ from ctypes import ( ) from ctypes.wintypes import DWORD, UINT -from plyer.facades import Audio -from plyer.platforms.win.storagepath import WinStoragePath +from sbapp.plyer.facades import Audio +from sbapp.plyer.platforms.win.storagepath import WinStoragePath # DWORD_PTR i.e. ULONG_PTR, 32/64bit ULONG_PTR = c_ulonglong if sizeof(c_void_p) == 8 else c_ulong @@ -308,6 +308,15 @@ class WinAudio(Audio): self._recorder = None self._player = None self._current_file = None + self._check_thread = None + self._finished_callback = None + self._loaded_path = None + self.is_playing = False + self.sound = None + self.pa = None + self.is_playing = False + self.recorder = None + self.should_record = False def _start(self): ''' @@ -390,6 +399,12 @@ class WinAudio(Audio): self._player = WinPlayer(device=open_params.wDeviceID) self._player.play() + def reload(self): + self._loaded_path = None + + def playing(self): + return self.is_playing + def instance(): ''' diff --git a/sbapp/plyer/platforms/win/battery.py b/sbapp/plyer/platforms/win/battery.py index 861d904..af899ca 100644 --- a/sbapp/plyer/platforms/win/battery.py +++ b/sbapp/plyer/platforms/win/battery.py @@ -2,8 +2,8 @@ Module of Windows API for plyer.battery. ''' -from plyer.platforms.win.libs.batterystatus import battery_status -from plyer.facades import Battery +from sbapp.plyer.platforms.win.libs.batterystatus import battery_status +from sbapp.plyer.facades import Battery from ctypes.wintypes import BYTE diff --git a/sbapp/plyer/platforms/win/cpu.py b/sbapp/plyer/platforms/win/cpu.py index d1262dc..9b65549 100644 --- a/sbapp/plyer/platforms/win/cpu.py +++ b/sbapp/plyer/platforms/win/cpu.py @@ -11,7 +11,7 @@ from ctypes.wintypes import ( BYTE, DWORD, WORD ) -from plyer.facades import CPU +from sbapp.plyer.facades import CPU KERNEL = windll.kernel32 diff --git a/sbapp/plyer/platforms/win/devicename.py b/sbapp/plyer/platforms/win/devicename.py index d35d76b..d631ac4 100644 --- a/sbapp/plyer/platforms/win/devicename.py +++ b/sbapp/plyer/platforms/win/devicename.py @@ -3,7 +3,7 @@ Module of Win API for plyer.devicename. ''' import socket -from plyer.facades import DeviceName +from sbapp.plyer.facades import DeviceName class WinDeviceName(DeviceName): diff --git a/sbapp/plyer/platforms/win/email.py b/sbapp/plyer/platforms/win/email.py index 4c0f9b7..3c43cde 100644 --- a/sbapp/plyer/platforms/win/email.py +++ b/sbapp/plyer/platforms/win/email.py @@ -7,7 +7,7 @@ try: from urllib.parse import quote except ImportError: from urllib import quote -from plyer.facades import Email +from sbapp.plyer.facades import Email class WindowsEmail(Email): diff --git a/sbapp/plyer/platforms/win/filechooser.py b/sbapp/plyer/platforms/win/filechooser.py index d61fdc1..9932d9b 100644 --- a/sbapp/plyer/platforms/win/filechooser.py +++ b/sbapp/plyer/platforms/win/filechooser.py @@ -3,7 +3,7 @@ Windows file chooser -------------------- ''' -from plyer.facades import FileChooser +from sbapp.plyer.facades import FileChooser from win32com.shell.shell import ( SHBrowseForFolder as browse, SHGetPathFromIDList as get_path @@ -84,7 +84,7 @@ class Win32FileChooser: # e.g. open_file(filters=['*.txt', '*.py']) filters = "" for f in self.filters: - if type(f) == str: + if isinstance(f, str): filters += (f + "\x00") * 2 else: filters += f[0] + "\x00" + ";".join(f[1:]) + "\x00" diff --git a/sbapp/plyer/platforms/win/keystore.py b/sbapp/plyer/platforms/win/keystore.py index 0065a6a..59e48d1 100644 --- a/sbapp/plyer/platforms/win/keystore.py +++ b/sbapp/plyer/platforms/win/keystore.py @@ -3,7 +3,7 @@ try: except Exception: raise NotImplementedError() -from plyer.facades import Keystore +from sbapp.plyer.facades import Keystore class WinKeystore(Keystore): diff --git a/sbapp/plyer/platforms/win/libs/balloontip.py b/sbapp/plyer/platforms/win/libs/balloontip.py index 5494f4e..1334a99 100644 --- a/sbapp/plyer/platforms/win/libs/balloontip.py +++ b/sbapp/plyer/platforms/win/libs/balloontip.py @@ -12,7 +12,7 @@ import ctypes import atexit from threading import RLock -from plyer.platforms.win.libs import win_api_defs +from sbapp.plyer.platforms.win.libs import win_api_defs WS_OVERLAPPED = 0x00000000 diff --git a/sbapp/plyer/platforms/win/libs/batterystatus.py b/sbapp/plyer/platforms/win/libs/batterystatus.py index 1d8202d..0c0c635 100644 --- a/sbapp/plyer/platforms/win/libs/batterystatus.py +++ b/sbapp/plyer/platforms/win/libs/batterystatus.py @@ -6,7 +6,7 @@ __all__ = ('battery_status') import ctypes -from plyer.platforms.win.libs import win_api_defs +from sbapp.plyer.platforms.win.libs import win_api_defs def battery_status(): diff --git a/sbapp/plyer/platforms/win/notification.py b/sbapp/plyer/platforms/win/notification.py index bac381b..b83838e 100644 --- a/sbapp/plyer/platforms/win/notification.py +++ b/sbapp/plyer/platforms/win/notification.py @@ -4,8 +4,8 @@ Module of Windows API for plyer.notification. from threading import Thread as thread -from plyer.facades import Notification -from plyer.platforms.win.libs.balloontip import balloon_tip +from sbapp.plyer.facades import Notification +from sbapp.plyer.platforms.win.libs.balloontip import balloon_tip class WindowsNotification(Notification): diff --git a/sbapp/plyer/platforms/win/screenshot.py b/sbapp/plyer/platforms/win/screenshot.py index b04eab5..1291fe9 100644 --- a/sbapp/plyer/platforms/win/screenshot.py +++ b/sbapp/plyer/platforms/win/screenshot.py @@ -18,8 +18,8 @@ from win32con import ( SRCCOPY ) -from plyer.facades import Screenshot -from plyer.platforms.win.storagepath import WinStoragePath +from sbapp.plyer.facades import Screenshot +from sbapp.plyer.platforms.win.storagepath import WinStoragePath class WinScreenshot(Screenshot): diff --git a/sbapp/plyer/platforms/win/storagepath.py b/sbapp/plyer/platforms/win/storagepath.py index fd905d8..1528483 100755 --- a/sbapp/plyer/platforms/win/storagepath.py +++ b/sbapp/plyer/platforms/win/storagepath.py @@ -3,9 +3,9 @@ Windows Storage Path -------------------- ''' -from plyer.facades import StoragePath +from sbapp.plyer.facades import StoragePath from os.path import expanduser -from plyer.platforms.win.libs.win_api_defs import get_PATH +from sbapp.plyer.platforms.win.libs.win_api_defs import get_PATH from uuid import UUID diff --git a/sbapp/plyer/platforms/win/tts.py b/sbapp/plyer/platforms/win/tts.py index e2539c3..2b9f8f9 100644 --- a/sbapp/plyer/platforms/win/tts.py +++ b/sbapp/plyer/platforms/win/tts.py @@ -1,6 +1,6 @@ import subprocess -from plyer.facades import TTS -from plyer.utils import whereis_exe +from sbapp.plyer.facades import TTS +from sbapp.plyer.utils import whereis_exe class EspeakTextToSpeech(TTS): diff --git a/sbapp/plyer/platforms/win/uniqueid.py b/sbapp/plyer/platforms/win/uniqueid.py index 6d42391..42853b7 100644 --- a/sbapp/plyer/platforms/win/uniqueid.py +++ b/sbapp/plyer/platforms/win/uniqueid.py @@ -10,7 +10,7 @@ except ImportError: except ImportError: raise NotImplementedError() -from plyer.facades import UniqueID +from sbapp.plyer.facades import UniqueID class WinUniqueID(UniqueID): diff --git a/sbapp/plyer/platforms/win/wifi.py b/sbapp/plyer/platforms/win/wifi.py index 68efb2f..152d4c0 100644 --- a/sbapp/plyer/platforms/win/wifi.py +++ b/sbapp/plyer/platforms/win/wifi.py @@ -1,5 +1,5 @@ -import plyer.platforms.win.libs.wifi_defs as wifi_lib -from plyer.facades import Wifi +import sbapp.plyer.platforms.win.libs.wifi_defs as wifi_lib +from sbapp.plyer.facades import Wifi class WindowWifi(Wifi): diff --git a/sbapp/plyer/tests/__init__.py b/sbapp/plyer/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/sbapp/plyer/tests/common.py b/sbapp/plyer/tests/common.py deleted file mode 100644 index 7436db8..0000000 --- a/sbapp/plyer/tests/common.py +++ /dev/null @@ -1,76 +0,0 @@ -''' -Common objects for testing -========================== - -* :class:`PlatformTest` - used as a decorator, allows running a test function - only on a specific platform (see `plyer.utils.platform`). -* :func:`platform_import` - manual import of a platform specific class instead - of using `plyer.facades.*` proxies. -''' - -import traceback -from os import sep -from os.path import normpath, splitdrive -from plyer.utils import platform as plyer_platform - - -class PlatformTest: - ''' - Class for the @PlatformTest decorator to prevent running tests - calling platform dependent API on different platforms. - ''' - - def __init__(self, platform): - self.platform = platform - - def __call__(self, func): - platform = self.platform - - if platform != plyer_platform: - print("Skipping test '{}' - not on '{}'".format( - func.__name__, platform - )) - func = self.eat - return func - - @staticmethod - def eat(*args, **kwargs): - ''' - Simply eat all positional and keyword arguments - and return None as an empty function. - ''' - - -def platform_import(platform, module_name, whereis_exe=None): - ''' - Import platform API directly instead of through Proxy. - ''' - - try: - module = 'plyer.platforms.{}.{}'.format( - platform, module_name - ) - mod = __import__(module, fromlist='.') - - except ImportError as exc: - print(vars(exc)) - traceback.print_exc() - - if whereis_exe: - mod.whereis_exe = whereis_exe - return mod - - -def splitpath(path): - ''' - Split string path into a list of folders (+ file if available). - ''' - if path[0] == sep and path[1] != sep: - path = path[1:] - path = normpath(path).split(sep) - else: - drive, path = splitdrive(path) - if path[0] == sep and path[1] != sep: - path = path[1:] - path = [drive, ] + normpath(path).split(sep) - return path diff --git a/sbapp/plyer/tests/images/kivy32.ico b/sbapp/plyer/tests/images/kivy32.ico deleted file mode 100644 index 2010bf3..0000000 Binary files a/sbapp/plyer/tests/images/kivy32.ico and /dev/null differ diff --git a/sbapp/plyer/tests/test_audio.py b/sbapp/plyer/tests/test_audio.py deleted file mode 100644 index e811891..0000000 --- a/sbapp/plyer/tests/test_audio.py +++ /dev/null @@ -1,110 +0,0 @@ -''' -TestAudio -========= - -Tested platforms: - -* macOS -* Windows - -.. versionadded:: 1.4.0 -''' - -import unittest -import time - -from os import mkdir, remove, environ -from os.path import join, expanduser, exists -from plyer.tests.common import platform_import, PlatformTest - - -class TestAudio(unittest.TestCase): - ''' - TestCase for plyer.audio. - - .. versionadded:: 1.4.0 - ''' - - @PlatformTest('macosx') - def test_audio_macosx(self): - ''' - Test macOS audio start, stop and play - - .. versionadded:: 1.4.0 - ''' - - path = join(expanduser('~'), 'Music') - if not exists(path): - mkdir(path) - - audio = platform_import( - platform='macosx', - module_name='audio', - ) - - self.assertIn('OSXAudio', dir(audio)) - audio = audio.instance() - self.assertIn('OSXAudio', str(audio)) - - self.assertFalse(exists(audio.file_path)) - self.assertIsNone(audio.start()) - time.sleep(0.5) - self.assertIsNone(audio.stop()) - self.assertIsNone(audio.play()) - time.sleep(0.5) - self.assertIsNone(audio.stop()) - - audio.file_path = audio.file_path.replace( - 'file://', '' - ) - - self.assertTrue(exists(audio.file_path)) - - remove(audio.file_path) - - @PlatformTest('win') - def test_audio_win(self): - ''' - Test Windows audio start, stop and play - - .. versionadded:: 1.4.0 - ''' - - if environ.get('APPVEYOR'): - # Appveyor has no recording device installed - # therefore the test will 100% fail - # - # error_code: 328 - # message: - # 'No wave device is installed that can record files in the current - # format. To install a wave device, go to Control Panel, click P') - return - - path = join(environ['USERPROFILE'], 'Music') - if not exists(path): - mkdir(path) - - audio = platform_import( - platform='win', - module_name='audio', - ) - - self.assertIn('WinAudio', dir(audio)) - audio = audio.instance() - self.assertIn('WinAudio', str(audio)) - - self.assertFalse(exists(audio.file_path)) - self.assertIsNone(audio.start()) - time.sleep(0.5) - self.assertIsNone(audio.stop()) - self.assertIsNone(audio.play()) - time.sleep(0.5) - self.assertIsNone(audio.stop()) - - self.assertTrue(exists(audio.file_path)) - - remove(audio.file_path) - - -if __name__ == '__main__': - unittest.main() diff --git a/sbapp/plyer/tests/test_battery.py b/sbapp/plyer/tests/test_battery.py deleted file mode 100644 index b44d504..0000000 --- a/sbapp/plyer/tests/test_battery.py +++ /dev/null @@ -1,413 +0,0 @@ -''' -TestBattery -=========== - -Tested platforms: - -* Windows -* Linux - upower, kernel sysclass -* macOS - ioreg -''' - -import unittest -from io import BytesIO -from os.path import join -from textwrap import dedent -from mock import patch, Mock - -from plyer.tests.common import PlatformTest, platform_import - - -class MockedKernelSysclass: - ''' - Mocked object used instead of Linux's sysclass for power_supply - battery uevent. - ''' - - @property - def path(self): - ''' - Mocked path to Linux kernel sysclass. - ''' - return join('/sys', 'class', 'power_supply', 'BAT0') - - @property - def charging(self): - ''' - Mocked battery charging status. - ''' - return u'Discharging' - - @property - def percentage(self): - ''' - Mocked battery charge percentage. - ''' - return 89.0 - - @property - def full(self): - ''' - Mocked full battery charge. - ''' - return 4764000 - - @property - def now(self): - ''' - Calculated current mocked battery charge. - ''' - return self.percentage * self.full / 100.0 - - @property - def uevent(self): - ''' - Mocked /sys/class/power_supply/BAT0 file. - ''' - return BytesIO(dedent(b'''\ - POWER_SUPPLY_NAME=BAT0 - POWER_SUPPLY_STATUS={} - POWER_SUPPLY_PRESENT=1 - POWER_SUPPLY_TECHNOLOGY=Li-ion - POWER_SUPPLY_CYCLE_COUNT=0 - POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 - POWER_SUPPLY_VOLTAGE_NOW=12074000 - POWER_SUPPLY_CURRENT_NOW=1584000 - POWER_SUPPLY_CHARGE_FULL_DESIGN=5800000 - POWER_SUPPLY_CHARGE_FULL={} - POWER_SUPPLY_CHARGE_NOW={} - POWER_SUPPLY_CAPACITY={} - POWER_SUPPLY_CAPACITY_LEVEL=Normal - POWER_SUPPLY_MODEL_NAME=1005HA - POWER_SUPPLY_MANUFACTURER=ASUS - POWER_SUPPLY_SERIAL_NUMBER=0 - '''.decode('utf-8').format( - self.charging, self.full, - self.now, int(self.percentage) - )).encode('utf-8')) - - -class MockedUPower: - ''' - Mocked object used instead of 'upower' binary in the Linux specific API - plyer.platforms.linux.battery. The same output structure is tested for - the range of . - - .. note:: Extend the object with another data sample if it does not match. - ''' - - min_version = '0.99.4' - max_version = '0.99.4' - - values = { - u'Device': u'/org/freedesktop/UPower/devices/battery_BAT0', - u'native-path': u'BAT0', - u'vendor': u'ASUS', - u'model': u'1005HA', - u'power supply': u'yes', - u'updated': u'Thu 05 Jul 2018 23:15:01 PM CEST', - u'has history': u'yes', - u'has statistics': u'yes', - u'battery': { - u'present': u'yes', - u'rechargeable': u'yes', - u'state': u'discharging', - u'warning-level': u'none', - u'energy': u'48,708 Wh', - u'energy-empty': u'0 Wh', - u'energy-full': u'54,216 Wh', - u'energy-full-design': u'62,64 Wh', - u'energy-rate': u'7,722 W', - u'voltage': u'11,916 V', - u'time to empty': u'6,3 hours', - u'percentage': u'89%', - u'capacity': u'86,5517%', - u'technology': u'lithium-ion', - u'icon-name': u"'battery-full-symbolic" - }, - u'History (charge)': u'1530959637 89,000 discharging', - u'History (rate)': u'1530958556 7,474 discharging' - } - - data = str( - ' native-path: {native-path}\n' - ' vendor: {vendor}\n' - ' model: {model}\n' - ' power supply: {power supply}\n' - ' updated: {updated}\n' - ' has history: {has history}\n' - ' has statistics: {has statistics}\n' - ' battery\n' - ' present: {battery[present]}\n' - ' rechargeable: {battery[rechargeable]}\n' - ' state: {battery[state]}\n' - ' warning-level: {battery[warning-level]}\n' - ' energy: {battery[energy]}\n' - ' energy-empty: {battery[energy-empty]}\n' - ' energy-full: {battery[energy-full]}\n' - ' energy-full-design: {battery[energy-full-design]}\n' - ' energy-rate: {battery[energy-rate]}\n' - ' voltage: {battery[voltage]}\n' - ' time to empty: {battery[time to empty]}\n' - ' percentage: {battery[percentage]}\n' - ' capacity: {battery[capacity]}\n' - ' technology: {battery[technology]}\n' - ' icon-name: {battery[icon-name]}\n' - ' History (charge):\n' - ' {History (charge)}\n' - ' History (rate):\n' - ' {History (rate)}\n' - ).format(**values).encode('utf-8') - # LinuxBattery calls decode() - - def __init__(self, *args, **kwargs): - # only to ignore all args, kwargs - pass - - @staticmethod - def communicate(): - ''' - Mock Popen.communicate, so that 'upower' isn't used. - ''' - return (MockedUPower.data, ) - - @staticmethod - def whereis_exe(binary): - ''' - Mock whereis_exe, so that it looks like - Linux UPower binary is present on the system. - ''' - return binary == 'upower' - - @staticmethod - def charging(): - ''' - Return charging bool from mocked data. - ''' - return MockedUPower.values['battery']['state'] == 'charging' - - @staticmethod - def percentage(): - ''' - Return percentage from mocked data. - ''' - percentage = MockedUPower.values['battery']['percentage'][:-1] - return float(percentage.replace(',', '.')) - - -class MockedIOReg: - ''' - Mocked object used instead of Apple's ioreg. - ''' - values = { - "MaxCapacity": "5023", - "CurrentCapacity": "4222", - "IsCharging": "No" - } - - output = dedent( - """+-o AppleSmartBattery - {{ - "TimeRemaining" = 585 - "AvgTimeToEmpty" = 585 - "InstantTimeToEmpty" = 761 - "ExternalChargeCapable" = Yes - "FullPathUpdated" = 1541845134 - "CellVoltage" = (4109,4118,4099,0) - "PermanentFailureStatus" = 0 - "BatteryInvalidWakeSeconds" = 30 - "AdapterInfo" = 0 - "MaxCapacity" = {MaxCapacity} - "Voltage" = 12326 - "DesignCycleCount70" = 13 - "Manufacturer" = "SWD" - "Location" = 0 - "CurrentCapacity" = {CurrentCapacity} - "LegacyBatteryInfo" = {{"Amperage"=18446744073709551183,"Flags"=4,\ - "Capacity"=5023,"Current"=4222,"Voltage"=12326,"Cycle Count"=40}} - "FirmwareSerialNumber" = 1 - "BatteryInstalled" = Yes - "PackReserve" = 117 - "CycleCount" = 40 - "DesignCapacity" = 5088 - "OperationStatus" = 58435 - "ManufactureDate" = 19700 - "AvgTimeToFull" = 65535 - "BatterySerialNumber" = "1234567890ABCDEFGH" - "BootPathUpdated" = 1541839734 - "PostDischargeWaitSeconds" = 120 - "Temperature" = 3038 - "UserVisiblePathUpdated" = 1541845194 - "InstantAmperage" = 18446744073709551249 - "ManufacturerData" = <000000000> - "FullyCharged" = No - "MaxErr" = 1 - "DeviceName" = "bq20z451" - "IOGeneralInterest" = "IOCommand is not serializable" - "Amperage" = 18446744073709551183 - "IsCharging" = {IsCharging} - "DesignCycleCount9C" = 1000 - "PostChargeWaitSeconds" = 120 - "ExternalConnected" = No - }}""" - ).format(**values).encode('utf-8') - - def __init__(self, *args, **kwargs): - # only to ignore all args, kwargs - pass - - @staticmethod - def communicate(): - ''' - Mock Popen.communicate, so that 'ioreg' isn't used. - ''' - return (MockedIOReg.output, ) - - @staticmethod - def whereis_exe(binary): - ''' - Mock whereis_exe, so that it looks like - macOS ioreg binary is present on the system. - ''' - return binary == 'ioreg' - - @staticmethod - def charging(): - ''' - Return charging bool from mocked data. - ''' - return MockedIOReg.values['IsCharging'] == 'Yes' - - @staticmethod - def percentage(): - ''' - Return percentage from mocked data. - ''' - current_capacity = int(MockedIOReg.values['CurrentCapacity']) - max_capacity = int(MockedIOReg.values['MaxCapacity']) - percentage = 100.0 * current_capacity / max_capacity - - return percentage - - -class TestBattery(unittest.TestCase): - ''' - TestCase for plyer.battery. - ''' - - def test_battery_linux_upower(self): - ''' - Test mocked Linux UPower for plyer.battery. - ''' - battery = platform_import( - platform='linux', - module_name='battery', - whereis_exe=MockedUPower.whereis_exe - ) - battery.Popen = MockedUPower - battery = battery.instance() - - self.assertEqual( - battery.status, { - 'isCharging': MockedUPower.charging(), - 'percentage': MockedUPower.percentage() - } - ) - - def test_battery_linux_kernel(self): - ''' - Test mocked Linux kernel sysclass for plyer.battery. - ''' - - def false(*args, **kwargs): - return False - - sysclass = MockedKernelSysclass() - - with patch(target='os.path.exists') as bat_path: - # first call to trigger exists() call - platform_import( - platform='linux', - module_name='battery', - whereis_exe=false - ).instance() - bat_path.assert_called_once_with(sysclass.path) - - # exists() checked with sysclass path - # set mock to proceed with this branch - bat_path.return_value = True - - battery = platform_import( - platform='linux', - module_name='battery', - whereis_exe=false - ).instance() - - stub = Mock(return_value=sysclass.uevent) - target = 'builtins.open' - - with patch(target=target, new=stub): - self.assertEqual( - battery.status, { - 'isCharging': sysclass.charging == 'Charging', - 'percentage': sysclass.percentage - } - ) - - @PlatformTest('win') - def test_battery_win(self): - ''' - Test Windows API for plyer.battery. - ''' - battery = platform_import( - platform='win', - module_name='battery' - ).instance() - for key in ('isCharging', 'percentage'): - self.assertIn(key, battery.status) - self.assertIsNotNone(battery.status[key]) - - def test_battery_macosx(self): - ''' - Test macOS IOReg for plyer.battery. - ''' - battery = platform_import( - platform='macosx', - module_name='battery', - whereis_exe=MockedIOReg.whereis_exe - ) - - battery.Popen = MockedIOReg - self.assertIn('OSXBattery', dir(battery)) - battery = battery.instance() - self.assertIn('OSXBattery', str(battery)) - - self.assertEqual( - battery.status, { - 'isCharging': MockedIOReg.charging(), - 'percentage': MockedIOReg.percentage() - } - ) - - def test_battery_macosx_instance(self): - ''' - Test macOS instance for plyer.battery - ''' - - def no_exe(*args, **kwargs): - return - - battery = platform_import( - platform='macosx', - module_name='battery', - whereis_exe=no_exe - ) - - battery = battery.instance() - self.assertNotIn('OSXBattery', str(battery)) - self.assertIn('Battery', str(battery)) - - -if __name__ == '__main__': - unittest.main() diff --git a/sbapp/plyer/tests/test_bluetooth.py b/sbapp/plyer/tests/test_bluetooth.py deleted file mode 100644 index 8997c4d..0000000 --- a/sbapp/plyer/tests/test_bluetooth.py +++ /dev/null @@ -1,144 +0,0 @@ -''' -TestBluetooth -============= - -Tested platforms: - -* macOS - system_profiler -''' - -import unittest - -from plyer.tests.common import platform_import -from textwrap import dedent - - -class MockedSystemProfiler: - ''' - Mocked object used instead of Apple's system_profiler - ''' - value = "On" - output = dedent( - """Bluetooth: - - Apple Bluetooth Software Version: 6.0.7f11 - Hardware, Features, and Settings: - Address: AA-00-BB-11-CC-22 - Bluetooth Low Energy Supported: Yes - Handoff Supported: Yes - Instant Hot Spot Supported: Yes - Manufacturer: Broadcom - Transport: UART - Chipset: 1234 - Firmware Version: v00 c0000 - Bluetooth Power: {} - Auto Seek Pointing: On - Remote wake: On - Vendor ID: 0x0000 - Product ID: 0x0000 - HCI Version: (0x0) - HCI Revision: 0x0000 - LMP Version: (0x0) - LMP Subversion: 0x0000 - Auto Seek Keyboard: On - Devices (Paired, Configured, etc.): - iPhone: - Address: AA-00-BB-11-CC-22 - Major Type: Miscellaneous - Minor Type: Unknown - Services: - Paired: No - Configured: Yes - Connected: No - Class of Device: 0x00 0x00 0x0000 - Services: - Bluetooth File Transfer: - Folder other devices can browse: ~/Public - When receiving items: Accept all without warning - State: Disabled - Bluetooth File Exchange: - Folder for accepted items: ~/Downloads - When other items are accepted: Save to location - When receiving items: Accept all without warning - State: Disabled - Bluetooth Internet Sharing: - State: Disabled - Incoming Serial Ports: - Bluetooth-Incoming-Port: - RFCOMM Channel: 3 - Requires Authentication: No""" - ).format(value).encode('utf-8') - - def __init__(self, *args, **kwargs): - # only to ignore all args, kwargs - pass - - @staticmethod - def communicate(): - ''' - Mock Popen.communicate, so that 'system_profiler' - isn't used. - ''' - return (MockedSystemProfiler.output, ) - - @staticmethod - def whereis_exe(binary): - ''' - Mock whereis_exe, so that it looks like - macOS system_profiler binary is present on the system. - ''' - return binary == 'system_profiler' - - @staticmethod - def get_info(): - ''' - Return current bluetooth status from mocked output. - ''' - return MockedSystemProfiler.value - - -class TestBluetooth(unittest.TestCase): - ''' - TestCase for plyer.bluetooth. - ''' - - def test_bluetooth_macosx(self): - ''' - Test macOS system_profiler for plyer.bluetooth. - ''' - bluetooth = platform_import( - platform='macosx', - module_name='bluetooth', - whereis_exe=MockedSystemProfiler.whereis_exe - ) - - bluetooth.Popen = MockedSystemProfiler - self.assertIn('OSXBluetooth', dir(bluetooth)) - bluetooth = bluetooth.instance() - self.assertIn('OSXBluetooth', str(bluetooth)) - - self.assertEqual( - bluetooth.info, MockedSystemProfiler.get_info() - ) - - def test_bluetooth_macosx_instance(self): - ''' - Test macOS instance for plyer.bluetooth. - ''' - - def no_exe(*args, **kwargs): - return - - bluetooth = platform_import( - platform='macosx', - module_name='bluetooth', - whereis_exe=no_exe - ) - - bluetooth = bluetooth.instance() - self.assertNotIn('OSXBluetooth', str(bluetooth)) - self.assertIn('Bluetooth', str(bluetooth)) - - -if __name__ == '__main__': - unittest.main() diff --git a/sbapp/plyer/tests/test_cpu.py b/sbapp/plyer/tests/test_cpu.py deleted file mode 100644 index 8882522..0000000 --- a/sbapp/plyer/tests/test_cpu.py +++ /dev/null @@ -1,262 +0,0 @@ -''' -TestCPU -======= - -Tested platforms: - -* Windows -* Linux - nproc -''' - -import unittest -from os import environ -from os.path import join -from mock import patch, Mock -from textwrap import dedent - -from plyer.tests.common import PlatformTest, platform_import, splitpath - - -class MockedKernelCPU: - def __init__(self, *args, **kwargs): - self.fname = args[0] if args else '' - self.cpu_path = join('/sys', 'devices', 'system', 'cpu') - self.cores = 16 - self.indicies = 4 - - def __enter__(self, *args): - file_value = None - cpu_path = self.cpu_path - spath = splitpath(self.fname) - - if self.fname == join(cpu_path, 'present'): - file_value = Mock() - file_value.read.return_value = self.present - elif spath[5] == 'cache' and spath[7] == 'level': - file_value = Mock() - # force bytes, because reading files as bytes - file_value.read.return_value = str( - self.index_types[spath[4]][spath[6]][spath[7]] - ).encode('utf-8') - return file_value - - def __exit__(self, *args): - pass - - @property - def present(self): - rng = list(range(self.cores)) - start = str(rng[0]) - end = str(rng[-1]) - if start == end: # cores == 1 --> b'0' - value = str(start) - else: # cores > 1 --> b'0-n' - value = str('-'.join([start, end])) - return value.encode('utf-8') - - @property - def listdir(self): - return ['index{}'.format(i) for i in range(self.indicies)] - - @property - def index_types(self): - # assign L1 to index0-1, L2 to 2, L3 to 3 - types = {0: 1, 1: 1, 2: 2, 3: 3} - - return { - 'cpu{}'.format(c): { - 'index{}'.format(i): { - 'level': types[i] - } - for i in range(self.indicies) - } - for c in range(self.cores) - } - - -class MockedNProc: - ''' - Mocked object used instead of 'nproc' binary in the Linux specific API - plyer.platforms.linux.cpu. The same output structure is tested for - the range of . - - .. note:: Extend the object with another data sample if it does not match. - ''' - - min_version = '8.21' - max_version = '8.21' - logical_cores = 99 - - def __init__(self, *args, **kwargs): - # only to ignore all args, kwargs - pass - - @staticmethod - def communicate(): - ''' - Mock Popen.communicate, so that 'nproc' isn't used. - ''' - return (str(MockedNProc.logical_cores).encode('utf-8'), ) - - @staticmethod - def whereis_exe(binary): - ''' - Mock whereis_exe, so that it looks like - Linux NProc binary is present on the system. - ''' - return binary == 'nproc' - - @staticmethod - def logical(): - ''' - Return percentage from mocked data. - ''' - return int(MockedNProc.logical_cores) - - -class MockedProcinfo: - # docs: - # https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git - # /tree/arch/x86/kernel/cpu/proc.c - sockets = 1 # physical id - physical = 2 # core id - threads_per_core = 2 # Intel specs document for i7-4500U - logical = physical * threads_per_core # processor - - def __init__(self, *args, **kwargs): - self.fname = args[0] if args else '' - - self.output = [] - __step = 0 # 0,1,0,1 -> 0,0,1,1 - for soc in range(self.sockets): - for log in range(self.logical): - if log != 0 and not log % self.physical: - __step += 1 - self.output.append((dedent( - '''\ - processor\t: {logical} - vendor_id\t: GenuineIntel - cpu family\t: 6 - model\t\t: 69 - model name\t: Intel(R) Core(TM) i7-4500U CPU @ 1.80GHz - stepping\t: 1 - microcode\t: 0x17 - cpu MHz\t\t: 774.000 - cache size\t: 4096 KB - physical id\t: {socket} - siblings\t: 4 - core id\t\t: {physical} - cpu cores\t: {threads_per_core} - apicid\t\t: {logical} - initial apicid\t: 0 - fpu\t\t: yes - fpu_exception\t: yes - cpuid level\t: 13 - wp\t\t: yes - flags\t\t: fpu vme de pse tsc msr pae mce cx8 ... - bogomips\t: 3591.40 - clflush size\t: 64 - cache_alignment\t: 64 - address sizes\t: 39 bits physical, 48 bits virtual - power management: - \n''' - )).format(**{ - 'socket': soc, - 'physical': __step, - 'logical': log, - 'threads_per_core': self.threads_per_core - })) - self.output = ''.join(self.output).encode('utf-8') - - def __enter__(self, *args): - file_value = None - - if self.fname == '/proc/cpuinfo': - file_value = Mock() - file_value.readlines.return_value = self.output.split( - '\n'.encode('utf-8') - ) - return file_value - - def __exit__(self, *args): - pass - - -class TestCPU(unittest.TestCase): - ''' - TestCase for plyer.cpu. - ''' - - def test_cpu_linux_physical(self): - cpu = platform_import( - platform='linux', - module_name='cpu', - whereis_exe=lambda b: b == 'nproc' - ).instance() - - stub = MockedProcinfo - target = 'builtins.open' - - with patch(target=target, new=stub): - sb = stub() - self.assertEqual( - cpu.physical, sb.physical - ) - - def test_cpu_linux_logical(self): - ''' - Test mocked Linux NProc for plyer.cpu. - ''' - cpu = platform_import( - platform='linux', - module_name='cpu', - whereis_exe=MockedNProc.whereis_exe - ) - cpu.Popen = MockedNProc - cpu = cpu.instance() - - self.assertEqual( - cpu.logical, MockedNProc.logical() - ) - - @PlatformTest('linux') - def test_cpu_linux_cache(self): - cpu = platform_import( - platform='linux', - module_name='cpu', - whereis_exe=lambda b: b == 'nproc' - ).instance() - - stub = MockedKernelCPU - target = 'builtins.open' - sub_target = 'plyer.platforms.linux.cpu.listdir' - - with patch(target=target, new=stub): - with patch(target=sub_target, return_value=stub().listdir): - sb = stub() - self.assertEqual( - cpu.cache, { - 'L1': sb.cores * 2, - 'L2': sb.cores, - 'L3': sb.cores - } - ) - - @PlatformTest('win') - def test_cpu_win_logical(self): - cpu = platform_import( - platform='win', - module_name='cpu' - ) - - cpu = cpu.instance() - self.assertEqual( - cpu.logical, - # https://docs.microsoft.com/en-us/previous-versions/ - # windows/it-pro/windows-xp/bb490954(v=technet.10) - int(environ['NUMBER_OF_PROCESSORS']) - ) - - -if __name__ == '__main__': - unittest.main() diff --git a/sbapp/plyer/tests/test_devicename.py b/sbapp/plyer/tests/test_devicename.py deleted file mode 100644 index cbb0fc4..0000000 --- a/sbapp/plyer/tests/test_devicename.py +++ /dev/null @@ -1,77 +0,0 @@ -''' -TestDeviceName -============ - -Tested platforms: - -* Windows -''' - -import unittest -from mock import patch -from plyer.tests.common import PlatformTest, platform_import -import socket - - -class TestDeviceName(unittest.TestCase): - ''' - TestCase for plyer.devicename. - ''' - - @PlatformTest('win') - def test_devicename_win(self): - ''' - Test Windows API for plyer.devicename. - ''' - devicename = platform_import(platform='win', - module_name='devicename' - ) - devicename_instance = devicename.instance() - - with patch.object(socket, - 'gethostname', - return_value='mocked_windows_hostname' - ) as _: - - evaluated_device_name = devicename_instance.device_name - self.assertEqual(evaluated_device_name, 'mocked_windows_hostname') - - @PlatformTest('linux') - def test_devicename_linux(self): - ''' - Test Linux API for plyer.devicename. - ''' - devicename = platform_import(platform='linux', - module_name='devicename' - ) - devicename_instance = devicename.instance() - - with patch.object(socket, - 'gethostname', - return_value='mocked_linux_hostname' - ) as _: - - evaluated_device_name = devicename_instance.device_name - self.assertEqual(evaluated_device_name, 'mocked_linux_hostname') - - @PlatformTest('macosx') - def test_devicename_macosx(self): - ''' - Test MacOSX API for plyer.devicename. - ''' - devicename = platform_import(platform='macosx', - module_name='devicename' - ) - devicename_instance = devicename.instance() - - with patch.object(socket, - 'gethostname', - return_value='mocked_macosx_hostname' - ) as _: - - evaluated_device_name = devicename_instance.device_name - self.assertEqual(evaluated_device_name, 'mocked_macosx_hostname') - - -if __name__ == '__main__': - unittest.main() diff --git a/sbapp/plyer/tests/test_email.py b/sbapp/plyer/tests/test_email.py deleted file mode 100644 index 60aa86f..0000000 --- a/sbapp/plyer/tests/test_email.py +++ /dev/null @@ -1,48 +0,0 @@ -''' -TestEmail -========= - -Tested platforms: - -* Windows -''' - -import unittest - -from mock import Mock, patch -from plyer.tests.common import PlatformTest, platform_import - - -class TestEmail(unittest.TestCase): - ''' - TestCase for plyer.email. - ''' - - @staticmethod - @PlatformTest('win') - def test_email_win(): - ''' - Test starting Windows email client for plyer.email. - ''' - email = platform_import( - platform='win', - module_name='email' - ) - - try: - test_mailto = 'mailto:recipient?subject=subject&body=text' - with patch(target='os.startfile', new=Mock()) as startfile: - email.instance().send( - recipient='recipient', - subject='subject', - text='text' - ) - startfile.assert_called_once_with(test_mailto) - except WindowsError: - # if WE is raised, email client isn't found, - # but the platform code works correctly - print('Mail client not found!') - - -if __name__ == '__main__': - unittest.main() diff --git a/sbapp/plyer/tests/test_facade.py b/sbapp/plyer/tests/test_facade.py deleted file mode 100644 index b49b15f..0000000 --- a/sbapp/plyer/tests/test_facade.py +++ /dev/null @@ -1,183 +0,0 @@ -''' -TestFacade -========== - -Tested platforms: - -* Android -* iOS -* Windows -* MacOS -* Linux -''' - -import unittest - -import sys -from types import MethodType - -from mock import Mock, patch - -import plyer - - -def mock_platform_module(mod, platform, cls): - ''' - Create a stub module for a specific platform. This module contains: - - * class inheriting from facade implementing the desired feature - * 'instance' function returning an instance of the implementing class - ''' - - # assemble an instance returned from the instance() function - # which is created from a dynamically created class - # .'> e.g.: - # - stub_inst = Mock( - __module__=mod, - __class__=type( - '{}{}'.format(platform.title(), cls), (object, ), { - '__module__': mod - } - ), - ) - - # manual 'return_value' assign to Mock, so that the instance() call - # can return stub_inst's own instance instead of creating another - # unnecessary Mock object - stub_inst.return_value = stub_inst - - # bind custom function returning the class name to stub_inst instance, - # so that instance().show() call requires 'self' i.e. instance parameter - # for the function to access the instance's class name - stub_inst.show = MethodType(lambda slf: slf, stub_inst) - - stub_mod = Mock(instance=stub_inst) - return stub_mod - - -# dummy pyjnius class to silence the import + config -class DummyJnius: - ''' - Mocked PyJNIus module. - ''' - - def __init__(self, *args, **kwargs): - class JavaClass: - ''' - Mocked PyJNIus JavaClass object. - ''' - - def __init__(self): - self.ANDROID_VERSION = None - self.SDK_INT = 1 - self.mActivity = None - - self.autoclass = lambda *a, **kw: JavaClass() - - -class TestFacade(unittest.TestCase): - ''' - TestCase for plyer.utils.Proxy and plyer.facades. - ''' - - def test_facade_existing_platforms(self): - ''' - Test for returning an object for Android API implementation - from Proxy object using a dynamically generated dummy objects. - ''' - _original = plyer.utils.platform - - for plat in {'android', 'ios', 'win', 'macosx', 'linux'}: - plyer.utils.platform = plat - - if plat == 'android': - # android platform automatically imports jnius - sys.modules['jnius'] = DummyJnius() - - # create stub module with instance func and class - stub_mod = mock_platform_module( - mod='plyer.platforms.{}.dummy'.format(plat), - platform=plyer.utils.platform, - cls='Dummy' - ) - - proxy_cls = plyer.utils.Proxy - target = 'builtins.__import__' - - with patch(target=target, return_value=stub_mod): - dummy = proxy_cls('dummy', stub_mod) - - self.assertEqual( - str(dummy.__class__).split("'")[1], - 'plyer.platforms.{}.dummy.{}Dummy'.format( - plat, plat.title() - ) - ) - self.assertEqual( - str(dummy.show().__class__).split("'")[1], - 'plyer.platforms.{}.dummy.{}Dummy'.format( - plat, plat.title() - ) - ) - - plyer.utils.platform = _original - - def test_facade_unknown(self): - ''' - Test fallback of Proxy to facade if there - is no such requested platform. - ''' - - _original = plyer.utils.platform - plyer.utils.platform = 'unknown' - - # no 'unknown' platform (folder), fallback to facade - class MockedProxy(plyer.utils.Proxy): - ''' - Partially mocked Proxy class, so that we pull the error - from traceback.print_exc to the test and check the calls. - ''' - - # _ensure_obj is called only once, to either - # get the platform object or fall back to facade - # therefore the three self.asserts below will return - # different values - expected_asserts = [True, False, False] - - def _ensure_obj(inst): - # called once, prints to stderr - - # mock stderr because traceback.print_exc uses it - # https://github.com/python/cpython/blob/ - # 16dfca4d829e45f36e71bf43f83226659ce49315/Lib/traceback.py#L99 - sys.stderr = Mock() - - # call the original function to trigger - # ImportError warnings in stderr - super(MockedProxy, inst)._ensure_obj() - - # Traceback (most recent call last): - # File "/app/plyer/utils.py", line 88, in _ensure_obj - # mod = __import__(module, fromlist='.') - # ImportError: No module named unknown.dummy - - # must not use self.assertX - # (has to be checked on the go!) - expected_bool = MockedProxy.expected_asserts.pop(0) - call_count = sys.stderr.write.call_count - assert (call_count == 6) == expected_bool, call_count - - # return stderr to the original state - sys.stderr = sys.__stderr__ - - proxy_cls = MockedProxy - facade = Mock() - dummy = proxy_cls('dummy', facade) - - self.assertEqual(dummy._mock_new_parent, facade) - plyer.utils.platform = _original - - -if __name__ == '__main__': - unittest.main() diff --git a/sbapp/plyer/tests/test_notification.py b/sbapp/plyer/tests/test_notification.py deleted file mode 100644 index 7177ad7..0000000 --- a/sbapp/plyer/tests/test_notification.py +++ /dev/null @@ -1,209 +0,0 @@ -''' -TestNotification -================ - -Tested platforms: - -* Windows -* Linux - notify-send, dbus -''' - -import unittest -import sys - -from time import sleep -from os.path import dirname, abspath, join - -from mock import Mock, patch -from plyer.tests.common import PlatformTest, platform_import - - -class MockedNotifySend: - ''' - Mocked object used instead of the console-like calling - of notify-send binary with parameters. - ''' - @staticmethod - def whereis_exe(binary): - ''' - Mock whereis_exe, so that it looks like - Linux notify-send binary is present on the system. - ''' - return binary == 'notify-send' - - @staticmethod - def call(args): - ''' - Mocked subprocess.call to check console parameters. - ''' - assert len(args) >= 3 - assert TestNotification.data['title'] in args - assert TestNotification.data['message'] in args - - @staticmethod - def warn(msg): - ''' - Mocked warnings.warn, so that we check the custom ImportError message. - ''' - assert 'dbus package is not installed' in msg - - -class TestNotification(unittest.TestCase): - ''' - TestCase for plyer.notification. - ''' - - data = { - 'title': 'title', - 'message': 'My Message\nis multiline', - 'app_name': 'Plyer Test', - 'app_icon': join( - dirname(abspath(__file__)), - 'images', 'kivy32.ico' - ), - 'timeout': 0.7 - } - - def show_notification(self, instance): - ''' - Call notify() from platform specific instance with sample data. - ''' - instance.notify(**self.data) - - @PlatformTest('win') - def test_notification_windows(self): - ''' - Test Windows API for plyer.notification. - ''' - import ctypes - from ctypes import ( - WINFUNCTYPE, POINTER, - create_unicode_buffer, - c_bool, c_int - ) - notif = platform_import( - platform='win', - module_name='notification' - ).instance() - enum_windows = ctypes.windll.user32.EnumWindows - get_class_name = ctypes.windll.user32.GetClassNameW - - # loop over windows and get refs to - # the opened plyer notifications - clsnames = [] - - def fetch_class(hwnd, *args): - ''' - EnumWindowsProc callback for EnumWindows. - ''' - buff = create_unicode_buffer(50) - get_class_name(hwnd, buff, 50) - - if 'Plyer' in buff.value: - clsnames.append(buff.value) - - # ensure it's not an empty facade - self.assertIn('WindowsNotification', str(notif)) - - # create enum function for EnumWindows - enum_windows_proc = WINFUNCTYPE( - # returns - c_bool, - - # input params: hwnd, lParam - POINTER(c_int), POINTER(c_int) - ) - - for i in range(3): - self.show_notification(notif) - - # the balloon needs some time to became visible in WinAPI - sleep(0.2) - - # fetch window class names - enum_windows( - # enum & params - enum_windows_proc(fetch_class), None - ) - - # 3 active balloons at the same time, - # class_name is incremented - see WindowsBalloonTip - self.assertEqual(len(clsnames), i + 1) - self.assertIn('PlyerTaskbar' + str(i), clsnames) - clsnames = [] - - @PlatformTest('linux') - def test_notification_dbus(self): - ''' - Test mocked Linux DBus for plyer.notification. - ''' - notif = platform_import( - platform='linux', - module_name='notification' - ) - self.assertIn('NotifyDbus', dir(notif)) - - # (3) mocked Interface called from dbus - interface = Mock() - interface.side_effect = (interface, ) - - # (2) mocked SessionBus called from dbus - session_bus = Mock() - session_bus.side_effect = (session_bus, ) - - # (1) mocked dbus for import - dbus = Mock(SessionBus=session_bus, Interface=interface) - - # inject the mocked module - self.assertNotIn('dbus', sys.modules) - sys.modules['dbus'] = dbus - - try: - notif = notif.instance() - self.assertIn('NotifyDbus', str(notif)) - - # call notify() - self.show_notification(notif) - - # check whether Mocks were called - dbus.SessionBus.assert_called_once() - - session_bus.get_object.assert_called_once_with( - 'org.freedesktop.Notifications', - '/org/freedesktop/Notifications' - ) - - interface.Notify.assert_called_once_with( - TestNotification.data['app_name'], - 0, - TestNotification.data['app_icon'], - TestNotification.data['title'], - TestNotification.data['message'], - [], {}, - TestNotification.data['timeout'] * 1000 - ) - finally: - del sys.modules['dbus'] - self.assertNotIn('dbus', sys.modules) - - @PlatformTest('linux') - def test_notification_notifysend(self): - ''' - Test mocked Linux notify-send for plyer.notification. - ''' - notif = platform_import( - platform='linux', - module_name='notification', - whereis_exe=MockedNotifySend.whereis_exe - ) - self.assertIn('NotifySendNotification', dir(notif)) - with patch(target='warnings.warn', new=MockedNotifySend.warn): - notif = notif.instance() - self.assertIn('NotifySendNotification', str(notif)) - - with patch(target='subprocess.call', new=MockedNotifySend.call): - self.assertIsNone(self.show_notification(notif)) - - -if __name__ == '__main__': - unittest.main() diff --git a/sbapp/plyer/tests/test_screenshot.py b/sbapp/plyer/tests/test_screenshot.py deleted file mode 100644 index 931fac2..0000000 --- a/sbapp/plyer/tests/test_screenshot.py +++ /dev/null @@ -1,137 +0,0 @@ -''' -TestScreenshot -============== - -Tested platforms: - -* MacOS -* Linux -''' - -import unittest - -from os import mkdir, remove -from os.path import join, expanduser, exists - -from mock import patch -from plyer.tests.common import PlatformTest, platform_import - - -class MockedScreenCapture: - ''' - Mocked object used instead of the console-like calling - of screencapture binary with parameters. - ''' - @staticmethod - def whereis_exe(binary): - ''' - Mock whereis_exe, so that it looks like - MacOS screencapture binary is present on the system. - ''' - return binary == 'screencapture' - - @staticmethod - def call(args): - ''' - Mocked subprocess.call to check console parameters. - ''' - assert len(args) == 2, len(args) - assert args[0] == 'screencapture', args - assert args[1] == join( - expanduser('~'), 'Pictures', 'screenshot.png' - ), args - with open(args[1], 'w') as scr: - scr.write('') - - -class MockedXWD: - ''' - Mocked object used instead of the console-like calling - of X11 xwd binary with parameters. - ''' - @staticmethod - def whereis_exe(binary): - ''' - Mock whereis_exe, so that it looks like - X11 xwd binary is present on the system. - ''' - return binary == 'xwd' - - @staticmethod - def call(args, stdout): - ''' - Mocked subprocess.call to check console parameters. - ''' - assert len(args) == 3, args - assert args[0] == 'xwd', args - assert args[1:] == ['-silent', '-root'], args - assert stdout.name == join( - expanduser('~'), 'Pictures', 'screenshot.xwd' - ), stdout.name - with open(stdout.name, 'w') as scr: - scr.write('') - - -class TestScreenshot(unittest.TestCase): - ''' - TestCase for plyer.screenshot. - ''' - - def setUp(self): - path = join(expanduser('~'), 'Pictures') - if not exists(path): - mkdir(path) - - @PlatformTest('macosx') - def test_screenshot_screencapture(self): - ''' - Test mocked MacOS screencapture for plyer.screenshot. - ''' - scr = platform_import( - platform='macosx', - module_name='screenshot', - whereis_exe=MockedScreenCapture.whereis_exe - ) - - # such class exists in screenshot module - self.assertIn('OSXScreenshot', dir(scr)) - - # the required instance is created - scr = scr.instance() - self.assertIn('OSXScreenshot', str(scr)) - - # move capture from context manager to run without mock - with patch(target='subprocess.call', new=MockedScreenCapture.call): - self.assertIsNone(scr.capture()) - - self.assertTrue(exists(scr.file_path)) - remove(scr.file_path) - - @PlatformTest('linux') - def test_screenshot_xwd(self): - ''' - Test mocked X11 xwd for plyer.screenshot. - ''' - scr = platform_import( - platform='linux', - module_name='screenshot', - whereis_exe=MockedXWD.whereis_exe - ) - - # such class exists in screenshot module - self.assertIn('LinuxScreenshot', dir(scr)) - - # the required instance is created - scr = scr.instance() - self.assertIn('LinuxScreenshot', str(scr)) - - # move capture from context manager to run without mock - with patch(target='subprocess.call', new=MockedXWD.call): - self.assertIsNone(scr.capture()) - - self.assertTrue(exists(scr.file_path)) - remove(scr.file_path) - - -if __name__ == '__main__': - unittest.main() diff --git a/sbapp/plyer/tests/test_storagepath.py b/sbapp/plyer/tests/test_storagepath.py deleted file mode 100644 index 35b8139..0000000 --- a/sbapp/plyer/tests/test_storagepath.py +++ /dev/null @@ -1,72 +0,0 @@ -''' -TestStoragePath -=============== - -Tested platforms: - -* macOS -''' - -import unittest - -from plyer.tests.common import platform_import, PlatformTest - - -class TestStoragePath(unittest.TestCase): - ''' - TestCase for plyer.storagepath. - ''' - - @PlatformTest('macosx') - def test_storagepath_macosx(self): - ''' - Test macOS for plyer.storagepath. - ''' - storagepath = platform_import( - platform='macosx', - module_name='storagepath' - ) - - self.assertIn('OSXStoragePath', dir(storagepath)) - storagepath = storagepath.instance() - self.assertIn('OSXStoragePath', str(storagepath)) - - path_format = 'file:///Users/' - - self.assertIn(path_format, storagepath.get_home_dir()) - self.assertIn('/', storagepath.get_root_dir()) - self.assertIn(path_format, storagepath.get_documents_dir()) - self.assertIn(path_format, storagepath.get_downloads_dir()) - self.assertIn(path_format, storagepath.get_videos_dir()) - self.assertIn(path_format, storagepath.get_music_dir()) - self.assertIn(path_format, storagepath.get_pictures_dir()) - self.assertIn(path_format, storagepath.get_application_dir()) - - @PlatformTest('win') - def test_storagepath_windows(self): - ''' - Test win for plyer.storagepath. - ''' - storagepath = platform_import( - platform='win', - module_name='storagepath' - ) - - self.assertIn('WinStoragePath', dir(storagepath)) - storagepath = storagepath.instance() - self.assertIn('WinStoragePath', str(storagepath)) - - path_format = ':\\' - - self.assertIn(path_format, storagepath.get_home_dir()) - self.assertIn(path_format, storagepath.get_root_dir()) - self.assertIn(path_format, storagepath.get_documents_dir()) - self.assertIn(path_format, storagepath.get_downloads_dir()) - self.assertIn(path_format, storagepath.get_videos_dir()) - self.assertIn(path_format, storagepath.get_music_dir()) - self.assertIn(path_format, storagepath.get_pictures_dir()) - self.assertIn(path_format, storagepath.get_application_dir()) - - -if __name__ == '__main__': - unittest.main() diff --git a/sbapp/plyer/tests/test_uniqueid.py b/sbapp/plyer/tests/test_uniqueid.py deleted file mode 100644 index 629c840..0000000 --- a/sbapp/plyer/tests/test_uniqueid.py +++ /dev/null @@ -1,78 +0,0 @@ -''' -TestUniqueID -============ - -Tested platforms: - -* Windows -''' - -import unittest -from mock import patch, Mock -from plyer.tests.common import PlatformTest, platform_import - - -class TestUniqueID(unittest.TestCase): - ''' - TestCase for plyer.uniqueid. - ''' - - def test_uniqueid(self): - ''' - General all platform test for plyer.uniqueid. - ''' - from plyer import uniqueid - self.assertTrue(len(uniqueid.id) > 0) - - @PlatformTest('win') - def test_uniqueid_win(self): - ''' - Test Windows API for plyer.uniqueid. - ''' - try: - from winreg import ( - HKEY_LOCAL_MACHINE as HKLM, - KEY_READ as READ, KEY_WOW64_64KEY as VIEW - ) - except ImportError: - from _winreg import ( - HKEY_LOCAL_MACHINE as HKLM, - KEY_READ as READ, KEY_WOW64_64KEY as VIEW - ) - - # mock the 'regedit' alias for winreg, - # see if the import passes and get the instance - regedit_mod = 'plyer.platforms.win.uniqueid.regedit' - with patch(target=regedit_mod): - uniqueid_ = platform_import( - platform='win', - module_name='uniqueid' - ) - uniqueid = uniqueid_.instance() - self.assertIsInstance(uniqueid_.regedit, Mock) - - # out of mocking block, regedit should be a winreg module - self.assertIsInstance(uniqueid_.regedit, type(unittest)) - - # OpenKey is supposed to return a handle to registry key - regedit_opkey = 'plyer.platforms.win.uniqueid.regedit.OpenKey' - with patch(target=regedit_opkey, return_value='unicorn') as opkey: - - # QueryValueEx is supposed to return 2 packed values - # (key, type_id) - queryval = 'plyer.platforms.win.uniqueid.regedit.QueryValueEx' - retval = ('unique', None) - with patch(target=queryval, return_value=retval) as query: - uid = uniqueid.id - opkey.assert_called_once_with( - # key, subkey - HKLM, r'SOFTWARE\\Microsoft\\Cryptography', - # reserved integer (has to be 0 - zero), access mask - 0, READ | VIEW - ) - query.assert_called_once_with('unicorn', 'MachineGuid') - self.assertEqual(uid, retval[0]) - - -if __name__ == '__main__': - unittest.main() diff --git a/sbapp/plyer/tests/test_utils.py b/sbapp/plyer/tests/test_utils.py deleted file mode 100644 index 3532b76..0000000 --- a/sbapp/plyer/tests/test_utils.py +++ /dev/null @@ -1,417 +0,0 @@ -''' -TestUtils -========= - -Tested platforms: - -* Android -* iOS -* Windows -* MacOS -* Linux -''' - -import unittest -from mock import patch - - -class TestUtils(unittest.TestCase): - ''' - TestCase for plyer.utils. - ''' - - def cutter(self, part, string): - ''' - Cut off a part of a string if it contains a substring, - otherwise raise an error. - ''' - self.assertIn(part, string) - return string[len(part):] - - def test_deprecated_function(self): - ''' - Test printed out warning with @deprecated decorator - on a function without any arguments. - ''' - - from plyer.utils import deprecated - - @deprecated - def function(): - ''' - Dummy deprecated function. - ''' - return 1 - - with patch(target='warnings.warn') as stderr: - self.assertEqual(function(), 1) - - args, _ = stderr.call_args_list[0] - args = args[0] - args = self.cutter('[WARNING] ', args) - args = self.cutter('deprecated function function', args) - args = self.cutter('test_utils.py', args) - args = self.cutter('Called from', args) - args = self.cutter('test_utils.py', args) - args = self.cutter('by test_deprecated_function().\n', args) - - args, _ = stderr.call_args_list[1] - self.assertEqual(args, ( - ''' - Dummy deprecated function. - ''', - )) - - def test_deprecated_function_arg(self): - ''' - Test printed out warning with @deprecated decorator - on a function with arguments. - ''' - - from plyer.utils import deprecated - - @deprecated - def function_with_arg(arg): - ''' - Dummy deprecated function with arg. - ''' - return arg - - with patch(target='warnings.warn') as stderr: - self.assertEqual(function_with_arg(1), 1) - - args, _ = stderr.call_args_list[0] - args = args[0] - args = self.cutter('[WARNING] ', args) - args = self.cutter('deprecated function function_with_arg', args) - args = self.cutter('test_utils.py', args) - args = self.cutter('Called from', args) - args = self.cutter('test_utils.py', args) - args = self.cutter('by test_deprecated_function_arg().\n', args) - - args, _ = stderr.call_args_list[1] - self.assertEqual(args, ( - ''' - Dummy deprecated function with arg. - ''', - )) - - def test_deprecated_function_kwarg(self): - ''' - Test printed out warning with @deprecated decorator - on a function with keyword arguments. - ''' - - from plyer.utils import deprecated - - @deprecated - def function_with_kwarg(kwarg): - ''' - Dummy deprecated function with kwarg. - ''' - return kwarg - - with patch(target='warnings.warn') as stderr: - self.assertEqual(function_with_kwarg(kwarg=1), 1) - - args, _ = stderr.call_args_list[0] - args = args[0] - args = self.cutter('[WARNING] ', args) - args = self.cutter('deprecated function function_with_kwarg', args) - args = self.cutter('test_utils.py', args) - args = self.cutter('Called from', args) - args = self.cutter('test_utils.py', args) - args = self.cutter('by test_deprecated_function_kwarg().\n', args) - - args, _ = stderr.call_args_list[1] - self.assertEqual(args, ( - ''' - Dummy deprecated function with kwarg. - ''', - )) - - def test_deprecated_class_method(self): - ''' - Test printed out warning with @deprecated decorator - on a instance bound method. - ''' - - from plyer.utils import deprecated - - class Class: - ''' - Dummy class with deprecated method method. - ''' - def __init__(self, *args, **kwargs): - self.args = args - self.kwargs = kwargs - - @deprecated - def method(self): - ''' - Dummy deprecated method. - ''' - return (self.args, self.kwargs) - - with patch(target='warnings.warn') as stderr: - args = (1, 2, 3) - kwargs = dict(x=1, y=2) - - cls = Class(*args, **kwargs) - self.assertEqual(cls.method(), (args, kwargs)) - - args, kwargs = stderr.call_args_list[0] - args = args[0] - args = self.cutter('[WARNING] ', args) - args = self.cutter('deprecated function method', args) - args = self.cutter('test_utils.py', args) - args = self.cutter('Called from', args) - args = self.cutter('test_utils.py', args) - args = self.cutter('by test_deprecated_class_method().\n', args) - - args, kwargs = stderr.call_args_list[1] - self.assertEqual(args, ( - ''' - Dummy deprecated method. - ''', - )) - - def test_deprecated_class_static_none(self): - ''' - Test printed out warning with @deprecated decorator - on a static method without arguments. - ''' - - from plyer.utils import deprecated - - class Class: - ''' - Dummy class with deprecated static method. - ''' - args = None - kwargs = None - - def __init__(self, *args, **kwargs): - Class.args = args - Class.kwargs = kwargs - - @staticmethod - @deprecated - def static(): - ''' - Dummy deprecated static method. - ''' - return (Class.args, Class.kwargs) - - with patch(target='warnings.warn') as stderr: - self.assertEqual(Class.static(), (None, None)) - - args, _ = stderr.call_args_list[0] - args = args[0] - args = self.cutter('[WARNING] ', args) - args = self.cutter('deprecated function static', args) - args = self.cutter('test_utils.py', args) - args = self.cutter('Called from', args) - args = self.cutter('test_utils.py', args) - args = self.cutter( - 'by test_deprecated_class_static_none().\n', args - ) - - args, _ = stderr.call_args_list[1] - self.assertEqual(args, ( - ''' - Dummy deprecated static method. - ''', - )) - - def test_deprecated_class_static_argskwargs(self): - ''' - Test printed out warning with @deprecated decorator - on a static method with arguments and keyword argument. - ''' - - from plyer.utils import deprecated - - class Class: - ''' - Dummy class with deprecated static method. - ''' - args = None - kwargs = None - - def __init__(self, *args, **kwargs): - Class.args = args - Class.kwargs = kwargs - - @staticmethod - @deprecated - def static(): - ''' - Dummy deprecated static method. - ''' - return (Class.args, Class.kwargs) - - with patch(target='warnings.warn') as stderr: - args = (1, 2, 3) - kwargs = dict(x=1, y=2) - - cls = Class(*args, **kwargs) - self.assertEqual(cls.static(), (args, kwargs)) - - args, kwargs = stderr.call_args_list[0] - args = args[0] - args = self.cutter('[WARNING] ', args) - args = self.cutter('deprecated function static', args) - args = self.cutter('test_utils.py', args) - args = self.cutter('Called from', args) - args = self.cutter('test_utils.py', args) - args = self.cutter( - 'by test_deprecated_class_static_argskwargs().\n', args - ) - - args, kwargs = stderr.call_args_list[1] - self.assertEqual(args, ( - ''' - Dummy deprecated static method. - ''', - )) - - def test_deprecated_class_clsmethod(self): - ''' - Test printed out warning with @deprecated decorator - on a class bound method. - ''' - - from plyer.utils import deprecated - - class Class: - ''' - Dummy class with deprecated class method. - ''' - args = None - kwargs = None - - @classmethod - @deprecated - def clsmethod(cls): - ''' - Dummy deprecated class method. - ''' - return (cls.args, cls.kwargs) - - with patch(target='warnings.warn') as stderr: - self.assertEqual(Class.clsmethod(), (None, None)) - - args, _ = stderr.call_args_list[0] - args = args[0] - args = self.cutter('[WARNING] ', args) - args = self.cutter('deprecated function clsmethod', args) - args = self.cutter('test_utils.py', args) - args = self.cutter('Called from', args) - args = self.cutter('test_utils.py', args) - args = self.cutter('by test_deprecated_class_clsmethod().\n', args) - - args, _ = stderr.call_args_list[1] - self.assertEqual(args, ( - ''' - Dummy deprecated class method. - ''', - )) - - def test_deprecated_class(self): - ''' - Test printed out warning with @deprecated decorator on a class. - ''' - - from plyer.utils import deprecated - - @deprecated - class Class: - ''' - Dummy deprecated class. - ''' - def __init__(self, *args, **kwargs): - self.args = args - self.kwargs = kwargs - - with patch(target='warnings.warn') as stderr: - args = (1, 2, 3) - kwargs = dict(x=1, y=2) - - cls = Class(*args, **kwargs) - self.assertIsInstance(cls, Class) - self.assertEqual(cls.args, args) - self.assertEqual(cls.kwargs, kwargs) - - args, _ = stderr.call_args_list[0] - args = args[0] - args = self.cutter('[WARNING] ', args) - args = self.cutter('Creating an instance', args) - args = self.cutter('deprecated class Class in', args) - args = self.cutter(__name__, args) - args = self.cutter('Called from', args) - args = self.cutter('test_utils.py', args) - args = self.cutter('by test_deprecated_class().\n', args) - - args, kwargs = stderr.call_args_list[1] - self.assertEqual(args, ( - ''' - Dummy deprecated class. - ''', - )) - - def test_deprecated_class_inherited(self): - ''' - Test printed out warning with @deprecated decorator on a class - which inherits from a deprecated class. - ''' - - from plyer.utils import deprecated - - @deprecated - class Class: - ''' - Dummy deprecated class. - ''' - def __init__(self, *args, **kwargs): - self.args = args - self.kwargs = kwargs - - class Inherited(Class): - ''' - Dummy class inheriting from a dummy deprecated class. - ''' - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.args = args - self.kwargs = kwargs - - with patch(target='warnings.warn') as stderr: - args = (1, 2, 3) - kwargs = dict(x=1, y=2) - - cls = Inherited(*args, **kwargs) - self.assertIsInstance(cls, Inherited) - self.assertEqual(cls.args, args) - self.assertEqual(cls.kwargs, kwargs) - - args, _ = stderr.call_args_list[0] - args = args[0] - args = self.cutter('[WARNING] ', args) - args = self.cutter('Creating an instance', args) - args = self.cutter('deprecated class Class in', args) - args = self.cutter(__name__, args) - args = self.cutter('Called from', args) - args = self.cutter('test_utils.py', args) - args = self.cutter('by test_deprecated_class_inherited().\n', args) - - args, kwargs = stderr.call_args_list[1] - self.assertEqual(args, ( - ''' - Dummy deprecated class. - ''', - )) - - -if __name__ == '__main__': - unittest.main() diff --git a/sbapp/plyer/utils.py b/sbapp/plyer/utils.py index 101763e..573075d 100644 --- a/sbapp/plyer/utils.py +++ b/sbapp/plyer/utils.py @@ -9,6 +9,7 @@ from os import environ from os import path from sys import platform as _sys_platform import sys +import RNS class Platform: @@ -91,8 +92,10 @@ class Proxy: # do the import try: name = object.__getattribute__(self, '_name') - module = 'plyer.platforms.{}.{}'.format( - platform, name) + if RNS.vendor.platformutils.is_android(): + module = 'plyer.platforms.{}.{}'.format(platform, name) + else: + module = 'sbapp.plyer.platforms.{}.{}'.format(platform, name) mod = __import__(module, fromlist='.') obj = mod.instance() except Exception: diff --git a/sbapp/pydub/__init__.py b/sbapp/pydub/__init__.py new file mode 100644 index 0000000..65e30b4 --- /dev/null +++ b/sbapp/pydub/__init__.py @@ -0,0 +1 @@ +from .audio_segment import AudioSegment \ No newline at end of file diff --git a/sbapp/pydub/audio_segment.py b/sbapp/pydub/audio_segment.py new file mode 100644 index 0000000..14ea46e --- /dev/null +++ b/sbapp/pydub/audio_segment.py @@ -0,0 +1,1399 @@ +from __future__ import division + +import array +import os +import subprocess +from tempfile import TemporaryFile, NamedTemporaryFile +import wave +import sys +import struct +from .logging_utils import log_conversion, log_subprocess_output +from .utils import mediainfo_json, fsdecode +import base64 +from collections import namedtuple + +try: + from StringIO import StringIO +except: + from io import StringIO + +from io import BytesIO + +try: + from itertools import izip +except: + izip = zip + +from .utils import ( + _fd_or_path_or_tempfile, + db_to_float, + ratio_to_db, + get_encoder_name, + get_array_type, + audioop, +) +from .exceptions import ( + TooManyMissingFrames, + InvalidDuration, + InvalidID3TagVersion, + InvalidTag, + CouldntDecodeError, + CouldntEncodeError, + MissingAudioParameter, +) + +if sys.version_info >= (3, 0): + basestring = str + xrange = range + StringIO = BytesIO + + +class ClassPropertyDescriptor(object): + + def __init__(self, fget, fset=None): + self.fget = fget + self.fset = fset + + def __get__(self, obj, klass=None): + if klass is None: + klass = type(obj) + return self.fget.__get__(obj, klass)() + + def __set__(self, obj, value): + if not self.fset: + raise AttributeError("can't set attribute") + type_ = type(obj) + return self.fset.__get__(obj, type_)(value) + + def setter(self, func): + if not isinstance(func, (classmethod, staticmethod)): + func = classmethod(func) + self.fset = func + return self + + +def classproperty(func): + if not isinstance(func, (classmethod, staticmethod)): + func = classmethod(func) + + return ClassPropertyDescriptor(func) + + +AUDIO_FILE_EXT_ALIASES = { + "m4a": "mp4", + "wave": "wav", +} + +WavSubChunk = namedtuple('WavSubChunk', ['id', 'position', 'size']) +WavData = namedtuple('WavData', ['audio_format', 'channels', 'sample_rate', + 'bits_per_sample', 'raw_data']) + + +def extract_wav_headers(data): + # def search_subchunk(data, subchunk_id): + pos = 12 # The size of the RIFF chunk descriptor + subchunks = [] + while pos + 8 <= len(data) and len(subchunks) < 10: + subchunk_id = data[pos:pos + 4] + subchunk_size = struct.unpack_from(' 2**32: + raise CouldntDecodeError("Unable to process >4GB files") + + # Set the file size in the RIFF chunk descriptor + data[4:8] = struct.pack(' b'\x7f'[0]]) + old_bytes = struct.pack(pack_fmt, b0, b1, b2) + byte_buffer.write(old_bytes) + + self._data = byte_buffer.getvalue() + self.sample_width = 4 + self.frame_width = self.channels * self.sample_width + + super(AudioSegment, self).__init__(*args, **kwargs) + + @property + def raw_data(self): + """ + public access to the raw audio data as a bytestring + """ + return self._data + + def get_array_of_samples(self, array_type_override=None): + """ + returns the raw_data as an array of samples + """ + if array_type_override is None: + array_type_override = self.array_type + return array.array(array_type_override, self._data) + + @property + def array_type(self): + return get_array_type(self.sample_width * 8) + + def __len__(self): + """ + returns the length of this audio segment in milliseconds + """ + return round(1000 * (self.frame_count() / self.frame_rate)) + + def __eq__(self, other): + try: + return self._data == other._data + except: + return False + + def __hash__(self): + return hash(AudioSegment) ^ hash((self.channels, self.frame_rate, self.sample_width, self._data)) + + def __ne__(self, other): + return not (self == other) + + def __iter__(self): + return (self[i] for i in xrange(len(self))) + + def __getitem__(self, millisecond): + if isinstance(millisecond, slice): + if millisecond.step: + return ( + self[i:i + millisecond.step] + for i in xrange(*millisecond.indices(len(self))) + ) + + start = millisecond.start if millisecond.start is not None else 0 + end = millisecond.stop if millisecond.stop is not None \ + else len(self) + + start = min(start, len(self)) + end = min(end, len(self)) + else: + start = millisecond + end = millisecond + 1 + + start = self._parse_position(start) * self.frame_width + end = self._parse_position(end) * self.frame_width + data = self._data[start:end] + + # ensure the output is as long as the requester is expecting + expected_length = end - start + missing_frames = (expected_length - len(data)) // self.frame_width + if missing_frames: + if missing_frames > self.frame_count(ms=2): + raise TooManyMissingFrames( + "You should never be filling in " + " more than 2 ms with silence here, " + "missing frames: %s" % missing_frames) + silence = audioop.mul(data[:self.frame_width], + self.sample_width, 0) + data += (silence * missing_frames) + + return self._spawn(data) + + def get_sample_slice(self, start_sample=None, end_sample=None): + """ + Get a section of the audio segment by sample index. + + NOTE: Negative indices do *not* address samples backword + from the end of the audio segment like a python list. + This is intentional. + """ + max_val = int(self.frame_count()) + + def bounded(val, default): + if val is None: + return default + if val < 0: + return 0 + if val > max_val: + return max_val + return val + + start_i = bounded(start_sample, 0) * self.frame_width + end_i = bounded(end_sample, max_val) * self.frame_width + + data = self._data[start_i:end_i] + return self._spawn(data) + + def __add__(self, arg): + if isinstance(arg, AudioSegment): + return self.append(arg, crossfade=0) + else: + return self.apply_gain(arg) + + def __radd__(self, rarg): + """ + Permit use of sum() builtin with an iterable of AudioSegments + """ + if rarg == 0: + return self + raise TypeError("Gains must be the second addend after the " + "AudioSegment") + + def __sub__(self, arg): + if isinstance(arg, AudioSegment): + raise TypeError("AudioSegment objects can't be subtracted from " + "each other") + else: + return self.apply_gain(-arg) + + def __mul__(self, arg): + """ + If the argument is an AudioSegment, overlay the multiplied audio + segment. + + If it's a number, just use the string multiply operation to repeat the + audio. + + The following would return an AudioSegment that contains the + audio of audio_seg eight times + + `audio_seg * 8` + """ + if isinstance(arg, AudioSegment): + return self.overlay(arg, position=0, loop=True) + else: + return self._spawn(data=self._data * arg) + + def _spawn(self, data, overrides={}): + """ + Creates a new audio segment using the metadata from the current one + and the data passed in. Should be used whenever an AudioSegment is + being returned by an operation that would alters the current one, + since AudioSegment objects are immutable. + """ + # accept lists of data chunks + if isinstance(data, list): + data = b''.join(data) + + if isinstance(data, array.array): + try: + data = data.tobytes() + except: + data = data.tostring() + + # accept file-like objects + if hasattr(data, 'read'): + if hasattr(data, 'seek'): + data.seek(0) + data = data.read() + + metadata = { + 'sample_width': self.sample_width, + 'frame_rate': self.frame_rate, + 'frame_width': self.frame_width, + 'channels': self.channels + } + metadata.update(overrides) + return self.__class__(data=data, metadata=metadata) + + @classmethod + def _sync(cls, *segs): + channels = max(seg.channels for seg in segs) + frame_rate = max(seg.frame_rate for seg in segs) + sample_width = max(seg.sample_width for seg in segs) + + return tuple( + seg.set_channels(channels).set_frame_rate(frame_rate).set_sample_width(sample_width) + for seg in segs + ) + + def _parse_position(self, val): + if val < 0: + val = len(self) - abs(val) + val = self.frame_count(ms=len(self)) if val == float("inf") else \ + self.frame_count(ms=val) + return int(val) + + @classmethod + def empty(cls): + return cls(b'', metadata={ + "channels": 1, + "sample_width": 1, + "frame_rate": 1, + "frame_width": 1 + }) + + @classmethod + def silent(cls, duration=1000, frame_rate=11025): + """ + Generate a silent audio segment. + duration specified in milliseconds (default duration: 1000ms, default frame_rate: 11025). + """ + frames = int(frame_rate * (duration / 1000.0)) + data = b"\0\0" * frames + return cls(data, metadata={"channels": 1, + "sample_width": 2, + "frame_rate": frame_rate, + "frame_width": 2}) + + @classmethod + def from_mono_audiosegments(cls, *mono_segments): + if not len(mono_segments): + raise ValueError("At least one AudioSegment instance is required") + + segs = cls._sync(*mono_segments) + + if segs[0].channels != 1: + raise ValueError( + "AudioSegment.from_mono_audiosegments requires all arguments are mono AudioSegment instances") + + channels = len(segs) + sample_width = segs[0].sample_width + frame_rate = segs[0].frame_rate + + frame_count = max(int(seg.frame_count()) for seg in segs) + data = array.array( + segs[0].array_type, + b'\0' * (frame_count * sample_width * channels) + ) + + for i, seg in enumerate(segs): + data[i::channels] = seg.get_array_of_samples() + + return cls( + data, + channels=channels, + sample_width=sample_width, + frame_rate=frame_rate, + ) + + @classmethod + def from_file_using_temporary_files(cls, file, format=None, codec=None, parameters=None, start_second=None, duration=None, **kwargs): + orig_file = file + file, close_file = _fd_or_path_or_tempfile(file, 'rb', tempfile=False) + + if format: + format = format.lower() + format = AUDIO_FILE_EXT_ALIASES.get(format, format) + + def is_format(f): + f = f.lower() + if format == f: + return True + if isinstance(orig_file, basestring): + return orig_file.lower().endswith(".{0}".format(f)) + if isinstance(orig_file, bytes): + return orig_file.lower().endswith((".{0}".format(f)).encode('utf8')) + return False + + if is_format("wav"): + try: + obj = cls._from_safe_wav(file) + if close_file: + file.close() + if start_second is None and duration is None: + return obj + elif start_second is not None and duration is None: + return obj[start_second*1000:] + elif start_second is None and duration is not None: + return obj[:duration*1000] + else: + return obj[start_second*1000:(start_second+duration)*1000] + except: + file.seek(0) + elif is_format("raw") or is_format("pcm"): + sample_width = kwargs['sample_width'] + frame_rate = kwargs['frame_rate'] + channels = kwargs['channels'] + metadata = { + 'sample_width': sample_width, + 'frame_rate': frame_rate, + 'channels': channels, + 'frame_width': channels * sample_width + } + obj = cls(data=file.read(), metadata=metadata) + if close_file: + file.close() + if start_second is None and duration is None: + return obj + elif start_second is not None and duration is None: + return obj[start_second * 1000:] + elif start_second is None and duration is not None: + return obj[:duration * 1000] + else: + return obj[start_second * 1000:(start_second + duration) * 1000] + + input_file = NamedTemporaryFile(mode='wb', delete=False) + try: + input_file.write(file.read()) + except(OSError): + input_file.flush() + input_file.close() + input_file = NamedTemporaryFile(mode='wb', delete=False, buffering=2 ** 31 - 1) + if close_file: + file.close() + close_file = True + file = open(orig_file, buffering=2 ** 13 - 1, mode='rb') + reader = file.read(2 ** 31 - 1) + while reader: + input_file.write(reader) + reader = file.read(2 ** 31 - 1) + input_file.flush() + if close_file: + file.close() + + output = NamedTemporaryFile(mode="rb", delete=False) + + conversion_command = [cls.converter, + '-y', # always overwrite existing files + ] + + # If format is not defined + # ffmpeg/avconv will detect it automatically + if format: + conversion_command += ["-f", format] + + if codec: + # force audio decoder + conversion_command += ["-acodec", codec] + + conversion_command += [ + "-i", input_file.name, # input_file options (filename last) + "-vn", # Drop any video streams if there are any + "-f", "wav" # output options (filename last) + ] + + if start_second is not None: + conversion_command += ["-ss", str(start_second)] + + if duration is not None: + conversion_command += ["-t", str(duration)] + + conversion_command += [output.name] + + if parameters is not None: + # extend arguments with arbitrary set + conversion_command.extend(parameters) + + log_conversion(conversion_command) + + with open(os.devnull, 'rb') as devnull: + p = subprocess.Popen(conversion_command, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p_out, p_err = p.communicate() + + log_subprocess_output(p_out) + log_subprocess_output(p_err) + + try: + if p.returncode != 0: + raise CouldntDecodeError( + "Decoding failed. ffmpeg returned error code: {0}\n\nOutput from ffmpeg/avlib:\n\n{1}".format( + p.returncode, p_err.decode(errors='ignore') )) + obj = cls._from_safe_wav(output) + finally: + input_file.close() + output.close() + os.unlink(input_file.name) + os.unlink(output.name) + + if start_second is None and duration is None: + return obj + elif start_second is not None and duration is None: + return obj[0:] + elif start_second is None and duration is not None: + return obj[:duration * 1000] + else: + return obj[0:duration * 1000] + + + @classmethod + def from_file(cls, file, format=None, codec=None, parameters=None, start_second=None, duration=None, **kwargs): + orig_file = file + try: + filename = fsdecode(file) + except TypeError: + filename = None + file, close_file = _fd_or_path_or_tempfile(file, 'rb', tempfile=False) + + if format: + format = format.lower() + format = AUDIO_FILE_EXT_ALIASES.get(format, format) + + def is_format(f): + f = f.lower() + if format == f: + return True + + if filename: + return filename.lower().endswith(".{0}".format(f)) + + return False + + if is_format("wav"): + try: + if start_second is None and duration is None: + return cls._from_safe_wav(file) + elif start_second is not None and duration is None: + return cls._from_safe_wav(file)[start_second*1000:] + elif start_second is None and duration is not None: + return cls._from_safe_wav(file)[:duration*1000] + else: + return cls._from_safe_wav(file)[start_second*1000:(start_second+duration)*1000] + except: + file.seek(0) + elif is_format("raw") or is_format("pcm"): + sample_width = kwargs['sample_width'] + frame_rate = kwargs['frame_rate'] + channels = kwargs['channels'] + metadata = { + 'sample_width': sample_width, + 'frame_rate': frame_rate, + 'channels': channels, + 'frame_width': channels * sample_width + } + if start_second is None and duration is None: + return cls(data=file.read(), metadata=metadata) + elif start_second is not None and duration is None: + return cls(data=file.read(), metadata=metadata)[start_second*1000:] + elif start_second is None and duration is not None: + return cls(data=file.read(), metadata=metadata)[:duration*1000] + else: + return cls(data=file.read(), metadata=metadata)[start_second*1000:(start_second+duration)*1000] + + conversion_command = [cls.converter, + '-y', # always overwrite existing files + ] + + # If format is not defined + # ffmpeg/avconv will detect it automatically + if format: + conversion_command += ["-f", format] + + if codec: + # force audio decoder + conversion_command += ["-acodec", codec] + + read_ahead_limit = kwargs.get('read_ahead_limit', -1) + if filename: + conversion_command += ["-i", filename] + stdin_parameter = None + stdin_data = None + else: + if cls.converter == 'ffmpeg': + conversion_command += ["-read_ahead_limit", str(read_ahead_limit), + "-i", "cache:pipe:0"] + else: + conversion_command += ["-i", "-"] + stdin_parameter = subprocess.PIPE + stdin_data = file.read() + + if codec: + info = None + else: + info = mediainfo_json(orig_file, read_ahead_limit=read_ahead_limit) + if info: + audio_streams = [x for x in info['streams'] + if x['codec_type'] == 'audio'] + # This is a workaround for some ffprobe versions that always say + # that mp3/mp4/aac/webm/ogg files contain fltp samples + audio_codec = audio_streams[0].get('codec_name') + if (audio_streams[0].get('sample_fmt') == 'fltp' and + audio_codec in ['mp3', 'mp4', 'aac', 'webm', 'ogg']): + bits_per_sample = 16 + else: + bits_per_sample = audio_streams[0]['bits_per_sample'] + if bits_per_sample == 8: + acodec = 'pcm_u8' + else: + acodec = 'pcm_s%dle' % bits_per_sample + + conversion_command += ["-acodec", acodec] + + conversion_command += [ + "-vn", # Drop any video streams if there are any + "-f", "wav" # output options (filename last) + ] + + if start_second is not None: + conversion_command += ["-ss", str(start_second)] + + if duration is not None: + conversion_command += ["-t", str(duration)] + + conversion_command += ["-"] + + if parameters is not None: + # extend arguments with arbitrary set + conversion_command.extend(parameters) + + log_conversion(conversion_command) + + p = subprocess.Popen(conversion_command, stdin=stdin_parameter, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p_out, p_err = p.communicate(input=stdin_data) + + if p.returncode != 0 or len(p_out) == 0: + if close_file: + file.close() + raise CouldntDecodeError( + "Decoding failed. ffmpeg returned error code: {0}\n\nOutput from ffmpeg/avlib:\n\n{1}".format( + p.returncode, p_err.decode(errors='ignore') )) + + p_out = bytearray(p_out) + fix_wav_headers(p_out) + p_out = bytes(p_out) + obj = cls(p_out) + + if close_file: + file.close() + + if start_second is None and duration is None: + return obj + elif start_second is not None and duration is None: + return obj[0:] + elif start_second is None and duration is not None: + return obj[:duration * 1000] + else: + return obj[0:duration * 1000] + + @classmethod + def from_mp3(cls, file, parameters=None): + return cls.from_file(file, 'mp3', parameters=parameters) + + @classmethod + def from_flv(cls, file, parameters=None): + return cls.from_file(file, 'flv', parameters=parameters) + + @classmethod + def from_ogg(cls, file, parameters=None): + return cls.from_file(file, 'ogg', parameters=parameters) + + @classmethod + def from_wav(cls, file, parameters=None): + return cls.from_file(file, 'wav', parameters=parameters) + + @classmethod + def from_raw(cls, file, **kwargs): + return cls.from_file(file, 'raw', sample_width=kwargs['sample_width'], frame_rate=kwargs['frame_rate'], + channels=kwargs['channels']) + + @classmethod + def _from_safe_wav(cls, file): + file, close_file = _fd_or_path_or_tempfile(file, 'rb', tempfile=False) + file.seek(0) + obj = cls(data=file) + if close_file: + file.close() + return obj + + def export(self, out_f=None, format='mp3', codec=None, bitrate=None, parameters=None, tags=None, id3v2_version='4', + cover=None): + """ + Export an AudioSegment to a file with given options + + out_f (string): + Path to destination audio file. Also accepts os.PathLike objects on + python >= 3.6 + + format (string) + Format for destination audio file. + ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files) + + codec (string) + Codec used to encode the destination file. + + bitrate (string) + Bitrate used when encoding destination file. (64, 92, 128, 256, 312k...) + Each codec accepts different bitrate arguments so take a look at the + ffmpeg documentation for details (bitrate usually shown as -b, -ba or + -a:b). + + parameters (list of strings) + Aditional ffmpeg/avconv parameters + + tags (dict) + Set metadata information to destination files + usually used as tags. ({title='Song Title', artist='Song Artist'}) + + id3v2_version (string) + Set ID3v2 version for tags. (default: '4') + + cover (file) + Set cover for audio file from image file. (png or jpg) + """ + id3v2_allowed_versions = ['3', '4'] + + if format == "raw" and (codec is not None or parameters is not None): + raise AttributeError( + 'Can not invoke ffmpeg when export format is "raw"; ' + 'specify an ffmpeg raw format like format="s16le" instead ' + 'or call export(format="raw") with no codec or parameters') + + out_f, _ = _fd_or_path_or_tempfile(out_f, 'wb+') + out_f.seek(0) + + if format == "raw": + out_f.write(self._data) + out_f.seek(0) + return out_f + + # wav with no ffmpeg parameters can just be written directly to out_f + easy_wav = format == "wav" and codec is None and parameters is None + + if easy_wav: + data = out_f + else: + data = NamedTemporaryFile(mode="wb", delete=False) + + pcm_for_wav = self._data + if self.sample_width == 1: + # convert to unsigned integers for wav + pcm_for_wav = audioop.bias(self._data, 1, 128) + + wave_data = wave.open(data, 'wb') + wave_data.setnchannels(self.channels) + wave_data.setsampwidth(self.sample_width) + wave_data.setframerate(self.frame_rate) + # For some reason packing the wave header struct with + # a float in python 2 doesn't throw an exception + wave_data.setnframes(int(self.frame_count())) + wave_data.writeframesraw(pcm_for_wav) + wave_data.close() + + # for easy wav files, we're done (wav data is written directly to out_f) + if easy_wav: + out_f.seek(0) + return out_f + + output = NamedTemporaryFile(mode="w+b", delete=False) + + # build converter command to export + conversion_command = [ + self.converter, + '-y', # always overwrite existing files + "-f", "wav", "-i", data.name, # input options (filename last) + ] + + if codec is None: + codec = self.DEFAULT_CODECS.get(format, None) + + if cover is not None: + if cover.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.tif', '.tiff')) and format == "mp3": + conversion_command.extend(["-i", cover, "-map", "0", "-map", "1", "-c:v", "mjpeg"]) + else: + raise AttributeError( + "Currently cover images are only supported by MP3 files. The allowed image formats are: .tif, .jpg, .bmp, .jpeg and .png.") + + if codec is not None: + # force audio encoder + conversion_command.extend(["-acodec", codec]) + + if bitrate is not None: + conversion_command.extend(["-b:a", bitrate]) + + if parameters is not None: + # extend arguments with arbitrary set + conversion_command.extend(parameters) + + if tags is not None: + if not isinstance(tags, dict): + raise InvalidTag("Tags must be a dictionary.") + else: + # Extend converter command with tags + # print(tags) + for key, value in tags.items(): + conversion_command.extend( + ['-metadata', '{0}={1}'.format(key, value)]) + + if format == 'mp3': + # set id3v2 tag version + if id3v2_version not in id3v2_allowed_versions: + raise InvalidID3TagVersion( + "id3v2_version not allowed, allowed versions: %s" % id3v2_allowed_versions) + conversion_command.extend([ + "-id3v2_version", id3v2_version + ]) + + if sys.platform == 'darwin' and codec == 'mp3': + conversion_command.extend(["-write_xing", "0"]) + + conversion_command.extend([ + "-f", format, output.name, # output options (filename last) + ]) + + log_conversion(conversion_command) + + # read stdin / write stdout + with open(os.devnull, 'rb') as devnull: + p = subprocess.Popen(conversion_command, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p_out, p_err = p.communicate() + + log_subprocess_output(p_out) + log_subprocess_output(p_err) + + if p.returncode != 0: + raise CouldntEncodeError( + "Encoding failed. ffmpeg/avlib returned error code: {0}\n\nCommand:{1}\n\nOutput from ffmpeg/avlib:\n\n{2}".format( + p.returncode, conversion_command, p_err.decode(errors='ignore') )) + + output.seek(0) + out_f.write(output.read()) + + data.close() + output.close() + + os.unlink(data.name) + os.unlink(output.name) + + out_f.seek(0) + return out_f + + def get_frame(self, index): + frame_start = index * self.frame_width + frame_end = frame_start + self.frame_width + return self._data[frame_start:frame_end] + + def frame_count(self, ms=None): + """ + returns the number of frames for the given number of milliseconds, or + if not specified, the number of frames in the whole AudioSegment + """ + if ms is not None: + return ms * (self.frame_rate / 1000.0) + else: + return float(len(self._data) // self.frame_width) + + def set_sample_width(self, sample_width): + if sample_width == self.sample_width: + return self + + frame_width = self.channels * sample_width + + return self._spawn( + audioop.lin2lin(self._data, self.sample_width, sample_width), + overrides={'sample_width': sample_width, 'frame_width': frame_width} + ) + + def set_frame_rate(self, frame_rate): + if frame_rate == self.frame_rate: + return self + + if self._data: + converted, _ = audioop.ratecv(self._data, self.sample_width, + self.channels, self.frame_rate, + frame_rate, None) + else: + converted = self._data + + return self._spawn(data=converted, + overrides={'frame_rate': frame_rate}) + + def set_channels(self, channels): + if channels == self.channels: + return self + + if channels == 2 and self.channels == 1: + fn = audioop.tostereo + frame_width = self.frame_width * 2 + fac = 1 + converted = fn(self._data, self.sample_width, fac, fac) + elif channels == 1 and self.channels == 2: + fn = audioop.tomono + frame_width = self.frame_width // 2 + fac = 0.5 + converted = fn(self._data, self.sample_width, fac, fac) + elif channels == 1: + channels_data = [seg.get_array_of_samples() for seg in self.split_to_mono()] + frame_count = int(self.frame_count()) + converted = array.array( + channels_data[0].typecode, + b'\0' * (frame_count * self.sample_width) + ) + for raw_channel_data in channels_data: + for i in range(frame_count): + converted[i] += raw_channel_data[i] // self.channels + frame_width = self.frame_width // self.channels + elif self.channels == 1: + dup_channels = [self for iChannel in range(channels)] + return AudioSegment.from_mono_audiosegments(*dup_channels) + else: + raise ValueError( + "AudioSegment.set_channels only supports mono-to-multi channel and multi-to-mono channel conversion") + + return self._spawn(data=converted, + overrides={ + 'channels': channels, + 'frame_width': frame_width}) + + def split_to_mono(self): + if self.channels == 1: + return [self] + + samples = self.get_array_of_samples() + + mono_channels = [] + for i in range(self.channels): + samples_for_current_channel = samples[i::self.channels] + + try: + mono_data = samples_for_current_channel.tobytes() + except AttributeError: + mono_data = samples_for_current_channel.tostring() + + mono_channels.append( + self._spawn(mono_data, overrides={"channels": 1, "frame_width": self.sample_width}) + ) + + return mono_channels + + @property + def rms(self): + return audioop.rms(self._data, self.sample_width) + + @property + def dBFS(self): + rms = self.rms + if not rms: + return -float("infinity") + return ratio_to_db(self.rms / self.max_possible_amplitude) + + @property + def max(self): + return audioop.max(self._data, self.sample_width) + + @property + def max_possible_amplitude(self): + bits = self.sample_width * 8 + max_possible_val = (2 ** bits) + + # since half is above 0 and half is below the max amplitude is divided + return max_possible_val / 2 + + @property + def max_dBFS(self): + return ratio_to_db(self.max, self.max_possible_amplitude) + + @property + def duration_seconds(self): + return self.frame_rate and self.frame_count() / self.frame_rate or 0.0 + + def get_dc_offset(self, channel=1): + """ + Returns a value between -1.0 and 1.0 representing the DC offset of a + channel (1 for left, 2 for right). + """ + if not 1 <= channel <= 2: + raise ValueError("channel value must be 1 (left) or 2 (right)") + + if self.channels == 1: + data = self._data + elif channel == 1: + data = audioop.tomono(self._data, self.sample_width, 1, 0) + else: + data = audioop.tomono(self._data, self.sample_width, 0, 1) + + return float(audioop.avg(data, self.sample_width)) / self.max_possible_amplitude + + def remove_dc_offset(self, channel=None, offset=None): + """ + Removes DC offset of given channel. Calculates offset if it's not given. + Offset values must be in range -1.0 to 1.0. If channel is None, removes + DC offset from all available channels. + """ + if channel and not 1 <= channel <= 2: + raise ValueError("channel value must be None, 1 (left) or 2 (right)") + + if offset and not -1.0 <= offset <= 1.0: + raise ValueError("offset value must be in range -1.0 to 1.0") + + if offset: + offset = int(round(offset * self.max_possible_amplitude)) + + def remove_data_dc(data, off): + if not off: + off = audioop.avg(data, self.sample_width) + return audioop.bias(data, self.sample_width, -off) + + if self.channels == 1: + return self._spawn(data=remove_data_dc(self._data, offset)) + + left_channel = audioop.tomono(self._data, self.sample_width, 1, 0) + right_channel = audioop.tomono(self._data, self.sample_width, 0, 1) + + if not channel or channel == 1: + left_channel = remove_data_dc(left_channel, offset) + + if not channel or channel == 2: + right_channel = remove_data_dc(right_channel, offset) + + left_channel = audioop.tostereo(left_channel, self.sample_width, 1, 0) + right_channel = audioop.tostereo(right_channel, self.sample_width, 0, 1) + + return self._spawn(data=audioop.add(left_channel, right_channel, + self.sample_width)) + + def apply_gain(self, volume_change): + return self._spawn(data=audioop.mul(self._data, self.sample_width, + db_to_float(float(volume_change)))) + + def overlay(self, seg, position=0, loop=False, times=None, gain_during_overlay=None): + """ + Overlay the provided segment on to this segment starting at the + specificed position and using the specfied looping beahvior. + + seg (AudioSegment): + The audio segment to overlay on to this one. + + position (optional int): + The position to start overlaying the provided segment in to this + one. + + loop (optional bool): + Loop seg as many times as necessary to match this segment's length. + Overrides loops param. + + times (optional int): + Loop seg the specified number of times or until it matches this + segment's length. 1 means once, 2 means twice, ... 0 would make the + call a no-op + gain_during_overlay (optional int): + Changes this segment's volume by the specified amount during the + duration of time that seg is overlaid on top of it. When negative, + this has the effect of 'ducking' the audio under the overlay. + """ + + if loop: + # match loop=True's behavior with new times (count) mechinism. + times = -1 + elif times is None: + # no times specified, just once through + times = 1 + elif times == 0: + # it's a no-op, make a copy since we never mutate + return self._spawn(self._data) + + output = StringIO() + + seg1, seg2 = AudioSegment._sync(self, seg) + sample_width = seg1.sample_width + spawn = seg1._spawn + + output.write(seg1[:position]._data) + + # drop down to the raw data + seg1 = seg1[position:]._data + seg2 = seg2._data + pos = 0 + seg1_len = len(seg1) + seg2_len = len(seg2) + while times: + remaining = max(0, seg1_len - pos) + if seg2_len >= remaining: + seg2 = seg2[:remaining] + seg2_len = remaining + # we've hit the end, we're done looping (if we were) and this + # is our last go-around + times = 1 + + if gain_during_overlay: + seg1_overlaid = seg1[pos:pos + seg2_len] + seg1_adjusted_gain = audioop.mul(seg1_overlaid, self.sample_width, + db_to_float(float(gain_during_overlay))) + output.write(audioop.add(seg1_adjusted_gain, seg2, sample_width)) + else: + output.write(audioop.add(seg1[pos:pos + seg2_len], seg2, + sample_width)) + pos += seg2_len + + # dec times to break our while loop (eventually) + times -= 1 + + output.write(seg1[pos:]) + + return spawn(data=output) + + def append(self, seg, crossfade=100): + seg1, seg2 = AudioSegment._sync(self, seg) + + if not crossfade: + return seg1._spawn(seg1._data + seg2._data) + elif crossfade > len(self): + raise ValueError("Crossfade is longer than the original AudioSegment ({}ms > {}ms)".format( + crossfade, len(self) + )) + elif crossfade > len(seg): + raise ValueError("Crossfade is longer than the appended AudioSegment ({}ms > {}ms)".format( + crossfade, len(seg) + )) + + xf = seg1[-crossfade:].fade(to_gain=-120, start=0, end=float('inf')) + xf *= seg2[:crossfade].fade(from_gain=-120, start=0, end=float('inf')) + + output = TemporaryFile() + + output.write(seg1[:-crossfade]._data) + output.write(xf._data) + output.write(seg2[crossfade:]._data) + + output.seek(0) + obj = seg1._spawn(data=output) + output.close() + return obj + + def fade(self, to_gain=0, from_gain=0, start=None, end=None, + duration=None): + """ + Fade the volume of this audio segment. + + to_gain (float): + resulting volume_change in db + + start (int): + default = beginning of the segment + when in this segment to start fading in milliseconds + + end (int): + default = end of the segment + when in this segment to start fading in milliseconds + + duration (int): + default = until the end of the audio segment + the duration of the fade + """ + if None not in [duration, end, start]: + raise TypeError('Only two of the three arguments, "start", ' + '"end", and "duration" may be specified') + + # no fade == the same audio + if to_gain == 0 and from_gain == 0: + return self + + start = min(len(self), start) if start is not None else None + end = min(len(self), end) if end is not None else None + + if start is not None and start < 0: + start += len(self) + if end is not None and end < 0: + end += len(self) + + if duration is not None and duration < 0: + raise InvalidDuration("duration must be a positive integer") + + if duration: + if start is not None: + end = start + duration + elif end is not None: + start = end - duration + else: + duration = end - start + + from_power = db_to_float(from_gain) + + output = [] + + # original data - up until the crossfade portion, as is + before_fade = self[:start]._data + if from_gain != 0: + before_fade = audioop.mul(before_fade, + self.sample_width, + from_power) + output.append(before_fade) + + gain_delta = db_to_float(to_gain) - from_power + + # fades longer than 100ms can use coarse fading (one gain step per ms), + # shorter fades will have audible clicks so they use precise fading + # (one gain step per sample) + if duration > 100: + scale_step = gain_delta / duration + + for i in range(duration): + volume_change = from_power + (scale_step * i) + chunk = self[start + i] + chunk = audioop.mul(chunk._data, + self.sample_width, + volume_change) + + output.append(chunk) + else: + start_frame = self.frame_count(ms=start) + end_frame = self.frame_count(ms=end) + fade_frames = end_frame - start_frame + scale_step = gain_delta / fade_frames + + for i in range(int(fade_frames)): + volume_change = from_power + (scale_step * i) + sample = self.get_frame(int(start_frame + i)) + sample = audioop.mul(sample, self.sample_width, volume_change) + + output.append(sample) + + # original data after the crossfade portion, at the new volume + after_fade = self[end:]._data + if to_gain != 0: + after_fade = audioop.mul(after_fade, + self.sample_width, + db_to_float(to_gain)) + output.append(after_fade) + + return self._spawn(data=output) + + def fade_out(self, duration): + return self.fade(to_gain=-120, duration=duration, end=float('inf')) + + def fade_in(self, duration): + return self.fade(from_gain=-120, duration=duration, start=0) + + def reverse(self): + return self._spawn( + data=audioop.reverse(self._data, self.sample_width) + ) + + def _repr_html_(self): + src = """ + + """ + fh = self.export() + data = base64.b64encode(fh.read()).decode('ascii') + return src.format(base64=data) + + +from . import effects diff --git a/sbapp/pydub/effects.py b/sbapp/pydub/effects.py new file mode 100644 index 0000000..0210521 --- /dev/null +++ b/sbapp/pydub/effects.py @@ -0,0 +1,341 @@ +import sys +import math +import array +from .utils import ( + db_to_float, + ratio_to_db, + register_pydub_effect, + make_chunks, + audioop, + get_min_max_value +) +from .silence import split_on_silence +from .exceptions import TooManyMissingFrames, InvalidDuration + +if sys.version_info >= (3, 0): + xrange = range + + +@register_pydub_effect +def apply_mono_filter_to_each_channel(seg, filter_fn): + n_channels = seg.channels + + channel_segs = seg.split_to_mono() + channel_segs = [filter_fn(channel_seg) for channel_seg in channel_segs] + + out_data = seg.get_array_of_samples() + for channel_i, channel_seg in enumerate(channel_segs): + for sample_i, sample in enumerate(channel_seg.get_array_of_samples()): + index = (sample_i * n_channels) + channel_i + out_data[index] = sample + + return seg._spawn(out_data) + + +@register_pydub_effect +def normalize(seg, headroom=0.1): + """ + headroom is how close to the maximum volume to boost the signal up to (specified in dB) + """ + peak_sample_val = seg.max + + # if the max is 0, this audio segment is silent, and can't be normalized + if peak_sample_val == 0: + return seg + + target_peak = seg.max_possible_amplitude * db_to_float(-headroom) + + needed_boost = ratio_to_db(target_peak / peak_sample_val) + return seg.apply_gain(needed_boost) + + +@register_pydub_effect +def speedup(seg, playback_speed=1.5, chunk_size=150, crossfade=25): + # we will keep audio in 150ms chunks since one waveform at 20Hz is 50ms long + # (20 Hz is the lowest frequency audible to humans) + + # portion of AUDIO TO KEEP. if playback speed is 1.25 we keep 80% (0.8) and + # discard 20% (0.2) + atk = 1.0 / playback_speed + + if playback_speed < 2.0: + # throwing out more than half the audio - keep 50ms chunks + ms_to_remove_per_chunk = int(chunk_size * (1 - atk) / atk) + else: + # throwing out less than half the audio - throw out 50ms chunks + ms_to_remove_per_chunk = int(chunk_size) + chunk_size = int(atk * chunk_size / (1 - atk)) + + # the crossfade cannot be longer than the amount of audio we're removing + crossfade = min(crossfade, ms_to_remove_per_chunk - 1) + + # DEBUG + #print("chunk: {0}, rm: {1}".format(chunk_size, ms_to_remove_per_chunk)) + + chunks = make_chunks(seg, chunk_size + ms_to_remove_per_chunk) + if len(chunks) < 2: + raise Exception("Could not speed up AudioSegment, it was too short {2:0.2f}s for the current settings:\n{0}ms chunks at {1:0.1f}x speedup".format( + chunk_size, playback_speed, seg.duration_seconds)) + + # we'll actually truncate a bit less than we calculated to make up for the + # crossfade between chunks + ms_to_remove_per_chunk -= crossfade + + # we don't want to truncate the last chunk since it is not guaranteed to be + # the full chunk length + last_chunk = chunks[-1] + chunks = [chunk[:-ms_to_remove_per_chunk] for chunk in chunks[:-1]] + + out = chunks[0] + for chunk in chunks[1:]: + out = out.append(chunk, crossfade=crossfade) + + out += last_chunk + return out + + +@register_pydub_effect +def strip_silence(seg, silence_len=1000, silence_thresh=-16, padding=100): + if padding > silence_len: + raise InvalidDuration("padding cannot be longer than silence_len") + + chunks = split_on_silence(seg, silence_len, silence_thresh, padding) + crossfade = padding / 2 + + if not len(chunks): + return seg[0:0] + + seg = chunks[0] + for chunk in chunks[1:]: + seg = seg.append(chunk, crossfade=crossfade) + + return seg + + +@register_pydub_effect +def compress_dynamic_range(seg, threshold=-20.0, ratio=4.0, attack=5.0, release=50.0): + """ + Keyword Arguments: + + threshold - default: -20.0 + Threshold in dBFS. default of -20.0 means -20dB relative to the + maximum possible volume. 0dBFS is the maximum possible value so + all values for this argument sould be negative. + + ratio - default: 4.0 + Compression ratio. Audio louder than the threshold will be + reduced to 1/ratio the volume. A ratio of 4.0 is equivalent to + a setting of 4:1 in a pro-audio compressor like the Waves C1. + + attack - default: 5.0 + Attack in milliseconds. How long it should take for the compressor + to kick in once the audio has exceeded the threshold. + + release - default: 50.0 + Release in milliseconds. How long it should take for the compressor + to stop compressing after the audio has falled below the threshold. + + + For an overview of Dynamic Range Compression, and more detailed explanation + of the related terminology, see: + + http://en.wikipedia.org/wiki/Dynamic_range_compression + """ + + thresh_rms = seg.max_possible_amplitude * db_to_float(threshold) + + look_frames = int(seg.frame_count(ms=attack)) + def rms_at(frame_i): + return seg.get_sample_slice(frame_i - look_frames, frame_i).rms + def db_over_threshold(rms): + if rms == 0: return 0.0 + db = ratio_to_db(rms / thresh_rms) + return max(db, 0) + + output = [] + + # amount to reduce the volume of the audio by (in dB) + attenuation = 0.0 + + attack_frames = seg.frame_count(ms=attack) + release_frames = seg.frame_count(ms=release) + for i in xrange(int(seg.frame_count())): + rms_now = rms_at(i) + + # with a ratio of 4.0 this means the volume will exceed the threshold by + # 1/4 the amount (of dB) that it would otherwise + max_attenuation = (1 - (1.0 / ratio)) * db_over_threshold(rms_now) + + attenuation_inc = max_attenuation / attack_frames + attenuation_dec = max_attenuation / release_frames + + if rms_now > thresh_rms and attenuation <= max_attenuation: + attenuation += attenuation_inc + attenuation = min(attenuation, max_attenuation) + else: + attenuation -= attenuation_dec + attenuation = max(attenuation, 0) + + frame = seg.get_frame(i) + if attenuation != 0.0: + frame = audioop.mul(frame, + seg.sample_width, + db_to_float(-attenuation)) + + output.append(frame) + + return seg._spawn(data=b''.join(output)) + + +# Invert the phase of the signal. + +@register_pydub_effect + +def invert_phase(seg, channels=(1, 1)): + """ + channels- specifies which channel (left or right) to reverse the phase of. + Note that mono AudioSegments will become stereo. + """ + if channels == (1, 1): + inverted = audioop.mul(seg._data, seg.sample_width, -1.0) + return seg._spawn(data=inverted) + + else: + if seg.channels == 2: + left, right = seg.split_to_mono() + else: + raise Exception("Can't implicitly convert an AudioSegment with " + str(seg.channels) + " channels to stereo.") + + if channels == (1, 0): + left = left.invert_phase() + else: + right = right.invert_phase() + + return seg.from_mono_audiosegments(left, right) + + + +# High and low pass filters based on implementation found on Stack Overflow: +# http://stackoverflow.com/questions/13882038/implementing-simple-high-and-low-pass-filters-in-c + +@register_pydub_effect +def low_pass_filter(seg, cutoff): + """ + cutoff - Frequency (in Hz) where higher frequency signal will begin to + be reduced by 6dB per octave (doubling in frequency) above this point + """ + RC = 1.0 / (cutoff * 2 * math.pi) + dt = 1.0 / seg.frame_rate + + alpha = dt / (RC + dt) + + original = seg.get_array_of_samples() + filteredArray = array.array(seg.array_type, original) + + frame_count = int(seg.frame_count()) + + last_val = [0] * seg.channels + for i in range(seg.channels): + last_val[i] = filteredArray[i] = original[i] + + for i in range(1, frame_count): + for j in range(seg.channels): + offset = (i * seg.channels) + j + last_val[j] = last_val[j] + (alpha * (original[offset] - last_val[j])) + filteredArray[offset] = int(last_val[j]) + + return seg._spawn(data=filteredArray) + + +@register_pydub_effect +def high_pass_filter(seg, cutoff): + """ + cutoff - Frequency (in Hz) where lower frequency signal will begin to + be reduced by 6dB per octave (doubling in frequency) below this point + """ + RC = 1.0 / (cutoff * 2 * math.pi) + dt = 1.0 / seg.frame_rate + + alpha = RC / (RC + dt) + + minval, maxval = get_min_max_value(seg.sample_width * 8) + + original = seg.get_array_of_samples() + filteredArray = array.array(seg.array_type, original) + + frame_count = int(seg.frame_count()) + + last_val = [0] * seg.channels + for i in range(seg.channels): + last_val[i] = filteredArray[i] = original[i] + + for i in range(1, frame_count): + for j in range(seg.channels): + offset = (i * seg.channels) + j + offset_minus_1 = ((i-1) * seg.channels) + j + + last_val[j] = alpha * (last_val[j] + original[offset] - original[offset_minus_1]) + filteredArray[offset] = int(min(max(last_val[j], minval), maxval)) + + return seg._spawn(data=filteredArray) + + +@register_pydub_effect +def pan(seg, pan_amount): + """ + pan_amount should be between -1.0 (100% left) and +1.0 (100% right) + + When pan_amount == 0.0 the left/right balance is not changed. + + Panning does not alter the *perceived* loundness, but since loudness + is decreasing on one side, the other side needs to get louder to + compensate. When panned hard left, the left channel will be 3dB louder. + """ + if not -1.0 <= pan_amount <= 1.0: + raise ValueError("pan_amount should be between -1.0 (100% left) and +1.0 (100% right)") + + max_boost_db = ratio_to_db(2.0) + boost_db = abs(pan_amount) * max_boost_db + + boost_factor = db_to_float(boost_db) + reduce_factor = db_to_float(max_boost_db) - boost_factor + + reduce_db = ratio_to_db(reduce_factor) + + # Cut boost in half (max boost== 3dB) - in reality 2 speakers + # do not sum to a full 6 dB. + boost_db = boost_db / 2.0 + + if pan_amount < 0: + return seg.apply_gain_stereo(boost_db, reduce_db) + else: + return seg.apply_gain_stereo(reduce_db, boost_db) + + +@register_pydub_effect +def apply_gain_stereo(seg, left_gain=0.0, right_gain=0.0): + """ + left_gain - amount of gain to apply to the left channel (in dB) + right_gain - amount of gain to apply to the right channel (in dB) + + note: mono audio segments will be converted to stereo + """ + if seg.channels == 1: + left = right = seg + elif seg.channels == 2: + left, right = seg.split_to_mono() + + l_mult_factor = db_to_float(left_gain) + r_mult_factor = db_to_float(right_gain) + + left_data = audioop.mul(left._data, left.sample_width, l_mult_factor) + left_data = audioop.tostereo(left_data, left.sample_width, 1, 0) + + right_data = audioop.mul(right._data, right.sample_width, r_mult_factor) + right_data = audioop.tostereo(right_data, right.sample_width, 0, 1) + + output = audioop.add(left_data, right_data, seg.sample_width) + + return seg._spawn(data=output, + overrides={'channels': 2, + 'frame_width': 2 * seg.sample_width}) diff --git a/sbapp/pydub/exceptions.py b/sbapp/pydub/exceptions.py new file mode 100644 index 0000000..79d0743 --- /dev/null +++ b/sbapp/pydub/exceptions.py @@ -0,0 +1,32 @@ +class PydubException(Exception): + """ + Base class for any Pydub exception + """ + + +class TooManyMissingFrames(PydubException): + pass + + +class InvalidDuration(PydubException): + pass + + +class InvalidTag(PydubException): + pass + + +class InvalidID3TagVersion(PydubException): + pass + + +class CouldntDecodeError(PydubException): + pass + + +class CouldntEncodeError(PydubException): + pass + + +class MissingAudioParameter(PydubException): + pass diff --git a/sbapp/pydub/generators.py b/sbapp/pydub/generators.py new file mode 100644 index 0000000..b04cb4c --- /dev/null +++ b/sbapp/pydub/generators.py @@ -0,0 +1,142 @@ +""" +Each generator will return float samples from -1.0 to 1.0, which can be +converted to actual audio with 8, 16, 24, or 32 bit depth using the +SiganlGenerator.to_audio_segment() method (on any of it's subclasses). + +See Wikipedia's "waveform" page for info on some of the generators included +here: http://en.wikipedia.org/wiki/Waveform +""" + +import math +import array +import itertools +import random +from .audio_segment import AudioSegment +from .utils import ( + db_to_float, + get_frame_width, + get_array_type, + get_min_max_value +) + + + +class SignalGenerator(object): + def __init__(self, sample_rate=44100, bit_depth=16): + self.sample_rate = sample_rate + self.bit_depth = bit_depth + + def to_audio_segment(self, duration=1000.0, volume=0.0): + """ + Duration in milliseconds + (default: 1 second) + Volume in DB relative to maximum amplitude + (default 0.0 dBFS, which is the maximum value) + """ + minval, maxval = get_min_max_value(self.bit_depth) + sample_width = get_frame_width(self.bit_depth) + array_type = get_array_type(self.bit_depth) + + gain = db_to_float(volume) + sample_count = int(self.sample_rate * (duration / 1000.0)) + + sample_data = (int(val * maxval * gain) for val in self.generate()) + sample_data = itertools.islice(sample_data, 0, sample_count) + + data = array.array(array_type, sample_data) + + try: + data = data.tobytes() + except: + data = data.tostring() + + return AudioSegment(data=data, metadata={ + "channels": 1, + "sample_width": sample_width, + "frame_rate": self.sample_rate, + "frame_width": sample_width, + }) + + def generate(self): + raise NotImplementedError("SignalGenerator subclasses must implement the generate() method, and *should not* call the superclass implementation.") + + + +class Sine(SignalGenerator): + def __init__(self, freq, **kwargs): + super(Sine, self).__init__(**kwargs) + self.freq = freq + + def generate(self): + sine_of = (self.freq * 2 * math.pi) / self.sample_rate + sample_n = 0 + while True: + yield math.sin(sine_of * sample_n) + sample_n += 1 + + + +class Pulse(SignalGenerator): + def __init__(self, freq, duty_cycle=0.5, **kwargs): + super(Pulse, self).__init__(**kwargs) + self.freq = freq + self.duty_cycle = duty_cycle + + def generate(self): + sample_n = 0 + + # in samples + cycle_length = self.sample_rate / float(self.freq) + pulse_length = cycle_length * self.duty_cycle + + while True: + if (sample_n % cycle_length) < pulse_length: + yield 1.0 + else: + yield -1.0 + sample_n += 1 + + + +class Square(Pulse): + def __init__(self, freq, **kwargs): + kwargs['duty_cycle'] = 0.5 + super(Square, self).__init__(freq, **kwargs) + + + +class Sawtooth(SignalGenerator): + def __init__(self, freq, duty_cycle=1.0, **kwargs): + super(Sawtooth, self).__init__(**kwargs) + self.freq = freq + self.duty_cycle = duty_cycle + + def generate(self): + sample_n = 0 + + # in samples + cycle_length = self.sample_rate / float(self.freq) + midpoint = cycle_length * self.duty_cycle + ascend_length = midpoint + descend_length = cycle_length - ascend_length + + while True: + cycle_position = sample_n % cycle_length + if cycle_position < midpoint: + yield (2 * cycle_position / ascend_length) - 1.0 + else: + yield 1.0 - (2 * (cycle_position - midpoint) / descend_length) + sample_n += 1 + + + +class Triangle(Sawtooth): + def __init__(self, freq, **kwargs): + kwargs['duty_cycle'] = 0.5 + super(Triangle, self).__init__(freq, **kwargs) + + +class WhiteNoise(SignalGenerator): + def generate(self): + while True: + yield (random.random() * 2) - 1.0 diff --git a/sbapp/pydub/logging_utils.py b/sbapp/pydub/logging_utils.py new file mode 100644 index 0000000..a312bd2 --- /dev/null +++ b/sbapp/pydub/logging_utils.py @@ -0,0 +1,14 @@ +""" + +""" +import logging + +converter_logger = logging.getLogger("pydub.converter") + +def log_conversion(conversion_command): + converter_logger.debug("subprocess.call(%s)", repr(conversion_command)) + +def log_subprocess_output(output): + if output: + for line in output.rstrip().splitlines(): + converter_logger.debug('subprocess output: %s', line.rstrip()) diff --git a/sbapp/pydub/playback.py b/sbapp/pydub/playback.py new file mode 100644 index 0000000..72ce4a5 --- /dev/null +++ b/sbapp/pydub/playback.py @@ -0,0 +1,71 @@ +""" +Support for playing AudioSegments. Pyaudio will be used if it's installed, +otherwise will fallback to ffplay. Pyaudio is a *much* nicer solution, but +is tricky to install. See my notes on installing pyaudio in a virtualenv (on +OSX 10.10): https://gist.github.com/jiaaro/9767512210a1d80a8a0d +""" + +import subprocess +from tempfile import NamedTemporaryFile +from .utils import get_player_name, make_chunks + +def _play_with_ffplay(seg): + PLAYER = get_player_name() + with NamedTemporaryFile("w+b", suffix=".wav") as f: + seg.export(f.name, "wav") + subprocess.call([PLAYER, "-nodisp", "-autoexit", "-hide_banner", f.name]) + + +def _play_with_pyaudio(seg): + import pyaudio + + p = pyaudio.PyAudio() + stream = p.open(format=p.get_format_from_width(seg.sample_width), + channels=seg.channels, + rate=seg.frame_rate, + output=True) + + # Just in case there were any exceptions/interrupts, we release the resource + # So as not to raise OSError: Device Unavailable should play() be used again + try: + # break audio into half-second chunks (to allows keyboard interrupts) + for chunk in make_chunks(seg, 500): + stream.write(chunk._data) + finally: + stream.stop_stream() + stream.close() + + p.terminate() + + +def _play_with_simpleaudio(seg): + import simpleaudio + return simpleaudio.play_buffer( + seg.raw_data, + num_channels=seg.channels, + bytes_per_sample=seg.sample_width, + sample_rate=seg.frame_rate + ) + + +def play(audio_segment): + try: + playback = _play_with_simpleaudio(audio_segment) + try: + playback.wait_done() + except KeyboardInterrupt: + playback.stop() + except ImportError: + pass + else: + return + + try: + _play_with_pyaudio(audio_segment) + return + except ImportError: + pass + else: + return + + _play_with_ffplay(audio_segment) diff --git a/sbapp/pydub/pyaudioop.py b/sbapp/pydub/pyaudioop.py new file mode 100644 index 0000000..9b1e2fb --- /dev/null +++ b/sbapp/pydub/pyaudioop.py @@ -0,0 +1,553 @@ +try: + from __builtin__ import max as builtin_max + from __builtin__ import min as builtin_min +except ImportError: + from builtins import max as builtin_max + from builtins import min as builtin_min +import math +import struct +try: + from fractions import gcd +except ImportError: # Python 3.9+ + from math import gcd +from ctypes import create_string_buffer + + +class error(Exception): + pass + + +def _check_size(size): + if size != 1 and size != 2 and size != 4: + raise error("Size should be 1, 2 or 4") + + +def _check_params(length, size): + _check_size(size) + if length % size != 0: + raise error("not a whole number of frames") + + +def _sample_count(cp, size): + return len(cp) / size + + +def _get_samples(cp, size, signed=True): + for i in range(_sample_count(cp, size)): + yield _get_sample(cp, size, i, signed) + + +def _struct_format(size, signed): + if size == 1: + return "b" if signed else "B" + elif size == 2: + return "h" if signed else "H" + elif size == 4: + return "i" if signed else "I" + + +def _get_sample(cp, size, i, signed=True): + fmt = _struct_format(size, signed) + start = i * size + end = start + size + return struct.unpack_from(fmt, buffer(cp)[start:end])[0] + + +def _put_sample(cp, size, i, val, signed=True): + fmt = _struct_format(size, signed) + struct.pack_into(fmt, cp, i * size, val) + + +def _get_maxval(size, signed=True): + if signed and size == 1: + return 0x7f + elif size == 1: + return 0xff + elif signed and size == 2: + return 0x7fff + elif size == 2: + return 0xffff + elif signed and size == 4: + return 0x7fffffff + elif size == 4: + return 0xffffffff + + +def _get_minval(size, signed=True): + if not signed: + return 0 + elif size == 1: + return -0x80 + elif size == 2: + return -0x8000 + elif size == 4: + return -0x80000000 + + +def _get_clipfn(size, signed=True): + maxval = _get_maxval(size, signed) + minval = _get_minval(size, signed) + return lambda val: builtin_max(min(val, maxval), minval) + + +def _overflow(val, size, signed=True): + minval = _get_minval(size, signed) + maxval = _get_maxval(size, signed) + if minval <= val <= maxval: + return val + + bits = size * 8 + if signed: + offset = 2**(bits-1) + return ((val + offset) % (2**bits)) - offset + else: + return val % (2**bits) + + +def getsample(cp, size, i): + _check_params(len(cp), size) + if not (0 <= i < len(cp) / size): + raise error("Index out of range") + return _get_sample(cp, size, i) + + +def max(cp, size): + _check_params(len(cp), size) + + if len(cp) == 0: + return 0 + + return builtin_max(abs(sample) for sample in _get_samples(cp, size)) + + +def minmax(cp, size): + _check_params(len(cp), size) + + max_sample, min_sample = 0, 0 + for sample in _get_samples(cp, size): + max_sample = builtin_max(sample, max_sample) + min_sample = builtin_min(sample, min_sample) + + return min_sample, max_sample + + +def avg(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + if sample_count == 0: + return 0 + return sum(_get_samples(cp, size)) / sample_count + + +def rms(cp, size): + _check_params(len(cp), size) + + sample_count = _sample_count(cp, size) + if sample_count == 0: + return 0 + + sum_squares = sum(sample**2 for sample in _get_samples(cp, size)) + return int(math.sqrt(sum_squares / sample_count)) + + +def _sum2(cp1, cp2, length): + size = 2 + total = 0 + for i in range(length): + total += getsample(cp1, size, i) * getsample(cp2, size, i) + return total + + +def findfit(cp1, cp2): + size = 2 + + if len(cp1) % 2 != 0 or len(cp2) % 2 != 0: + raise error("Strings should be even-sized") + + if len(cp1) < len(cp2): + raise error("First sample should be longer") + + len1 = _sample_count(cp1, size) + len2 = _sample_count(cp2, size) + + sum_ri_2 = _sum2(cp2, cp2, len2) + sum_aij_2 = _sum2(cp1, cp1, len2) + sum_aij_ri = _sum2(cp1, cp2, len2) + + result = (sum_ri_2 * sum_aij_2 - sum_aij_ri * sum_aij_ri) / sum_aij_2 + + best_result = result + best_i = 0 + + for i in range(1, len1 - len2 + 1): + aj_m1 = _get_sample(cp1, size, i - 1) + aj_lm1 = _get_sample(cp1, size, i + len2 - 1) + + sum_aij_2 += aj_lm1**2 - aj_m1**2 + sum_aij_ri = _sum2(buffer(cp1)[i*size:], cp2, len2) + + result = (sum_ri_2 * sum_aij_2 - sum_aij_ri * sum_aij_ri) / sum_aij_2 + + if result < best_result: + best_result = result + best_i = i + + factor = _sum2(buffer(cp1)[best_i*size:], cp2, len2) / sum_ri_2 + + return best_i, factor + + +def findfactor(cp1, cp2): + size = 2 + + if len(cp1) % 2 != 0: + raise error("Strings should be even-sized") + + if len(cp1) != len(cp2): + raise error("Samples should be same size") + + sample_count = _sample_count(cp1, size) + + sum_ri_2 = _sum2(cp2, cp2, sample_count) + sum_aij_ri = _sum2(cp1, cp2, sample_count) + + return sum_aij_ri / sum_ri_2 + + +def findmax(cp, len2): + size = 2 + sample_count = _sample_count(cp, size) + + if len(cp) % 2 != 0: + raise error("Strings should be even-sized") + + if len2 < 0 or sample_count < len2: + raise error("Input sample should be longer") + + if sample_count == 0: + return 0 + + result = _sum2(cp, cp, len2) + best_result = result + best_i = 0 + + for i in range(1, sample_count - len2 + 1): + sample_leaving_window = getsample(cp, size, i - 1) + sample_entering_window = getsample(cp, size, i + len2 - 1) + + result -= sample_leaving_window**2 + result += sample_entering_window**2 + + if result > best_result: + best_result = result + best_i = i + + return best_i + + +def avgpp(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + prevextremevalid = False + prevextreme = None + avg = 0 + nextreme = 0 + + prevval = getsample(cp, size, 0) + val = getsample(cp, size, 1) + + prevdiff = val - prevval + + for i in range(1, sample_count): + val = getsample(cp, size, i) + diff = val - prevval + + if diff * prevdiff < 0: + if prevextremevalid: + avg += abs(prevval - prevextreme) + nextreme += 1 + + prevextremevalid = True + prevextreme = prevval + + prevval = val + if diff != 0: + prevdiff = diff + + if nextreme == 0: + return 0 + + return avg / nextreme + + +def maxpp(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + prevextremevalid = False + prevextreme = None + max = 0 + + prevval = getsample(cp, size, 0) + val = getsample(cp, size, 1) + + prevdiff = val - prevval + + for i in range(1, sample_count): + val = getsample(cp, size, i) + diff = val - prevval + + if diff * prevdiff < 0: + if prevextremevalid: + extremediff = abs(prevval - prevextreme) + if extremediff > max: + max = extremediff + prevextremevalid = True + prevextreme = prevval + + prevval = val + if diff != 0: + prevdiff = diff + + return max + + +def cross(cp, size): + _check_params(len(cp), size) + + crossings = 0 + last_sample = 0 + for sample in _get_samples(cp, size): + if sample <= 0 < last_sample or sample >= 0 > last_sample: + crossings += 1 + last_sample = sample + + return crossings + + +def mul(cp, size, factor): + _check_params(len(cp), size) + clip = _get_clipfn(size) + + result = create_string_buffer(len(cp)) + + for i, sample in enumerate(_get_samples(cp, size)): + sample = clip(int(sample * factor)) + _put_sample(result, size, i, sample) + + return result.raw + + +def tomono(cp, size, fac1, fac2): + _check_params(len(cp), size) + clip = _get_clipfn(size) + + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp) / 2) + + for i in range(0, sample_count, 2): + l_sample = getsample(cp, size, i) + r_sample = getsample(cp, size, i + 1) + + sample = (l_sample * fac1) + (r_sample * fac2) + sample = clip(sample) + + _put_sample(result, size, i / 2, sample) + + return result.raw + + +def tostereo(cp, size, fac1, fac2): + _check_params(len(cp), size) + + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp) * 2) + clip = _get_clipfn(size) + + for i in range(sample_count): + sample = _get_sample(cp, size, i) + + l_sample = clip(sample * fac1) + r_sample = clip(sample * fac2) + + _put_sample(result, size, i * 2, l_sample) + _put_sample(result, size, i * 2 + 1, r_sample) + + return result.raw + + +def add(cp1, cp2, size): + _check_params(len(cp1), size) + + if len(cp1) != len(cp2): + raise error("Lengths should be the same") + + clip = _get_clipfn(size) + sample_count = _sample_count(cp1, size) + result = create_string_buffer(len(cp1)) + + for i in range(sample_count): + sample1 = getsample(cp1, size, i) + sample2 = getsample(cp2, size, i) + + sample = clip(sample1 + sample2) + + _put_sample(result, size, i, sample) + + return result.raw + + +def bias(cp, size, bias): + _check_params(len(cp), size) + + result = create_string_buffer(len(cp)) + + for i, sample in enumerate(_get_samples(cp, size)): + sample = _overflow(sample + bias, size) + _put_sample(result, size, i, sample) + + return result.raw + + +def reverse(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp)) + for i, sample in enumerate(_get_samples(cp, size)): + _put_sample(result, size, sample_count - i - 1, sample) + + return result.raw + + +def lin2lin(cp, size, size2): + _check_params(len(cp), size) + _check_size(size2) + + if size == size2: + return cp + + new_len = (len(cp) / size) * size2 + + result = create_string_buffer(new_len) + + for i in range(_sample_count(cp, size)): + sample = _get_sample(cp, size, i) + if size < size2: + sample = sample << (4 * size2 / size) + elif size > size2: + sample = sample >> (4 * size / size2) + + sample = _overflow(sample, size2) + + _put_sample(result, size2, i, sample) + + return result.raw + + +def ratecv(cp, size, nchannels, inrate, outrate, state, weightA=1, weightB=0): + _check_params(len(cp), size) + if nchannels < 1: + raise error("# of channels should be >= 1") + + bytes_per_frame = size * nchannels + frame_count = len(cp) / bytes_per_frame + + if bytes_per_frame / nchannels != size: + raise OverflowError("width * nchannels too big for a C int") + + if weightA < 1 or weightB < 0: + raise error("weightA should be >= 1, weightB should be >= 0") + + if len(cp) % bytes_per_frame != 0: + raise error("not a whole number of frames") + + if inrate <= 0 or outrate <= 0: + raise error("sampling rate not > 0") + + d = gcd(inrate, outrate) + inrate /= d + outrate /= d + + prev_i = [0] * nchannels + cur_i = [0] * nchannels + + if state is None: + d = -outrate + else: + d, samps = state + + if len(samps) != nchannels: + raise error("illegal state argument") + + prev_i, cur_i = zip(*samps) + prev_i, cur_i = list(prev_i), list(cur_i) + + q = frame_count / inrate + ceiling = (q + 1) * outrate + nbytes = ceiling * bytes_per_frame + + result = create_string_buffer(nbytes) + + samples = _get_samples(cp, size) + out_i = 0 + while True: + while d < 0: + if frame_count == 0: + samps = zip(prev_i, cur_i) + retval = result.raw + + # slice off extra bytes + trim_index = (out_i * bytes_per_frame) - len(retval) + retval = buffer(retval)[:trim_index] + + return (retval, (d, tuple(samps))) + + for chan in range(nchannels): + prev_i[chan] = cur_i[chan] + cur_i[chan] = samples.next() + + cur_i[chan] = ( + (weightA * cur_i[chan] + weightB * prev_i[chan]) + / (weightA + weightB) + ) + + frame_count -= 1 + d += outrate + + while d >= 0: + for chan in range(nchannels): + cur_o = ( + (prev_i[chan] * d + cur_i[chan] * (outrate - d)) + / outrate + ) + _put_sample(result, size, out_i, _overflow(cur_o, size)) + out_i += 1 + d -= inrate + + +def lin2ulaw(cp, size): + raise NotImplementedError() + + +def ulaw2lin(cp, size): + raise NotImplementedError() + + +def lin2alaw(cp, size): + raise NotImplementedError() + + +def alaw2lin(cp, size): + raise NotImplementedError() + + +def lin2adpcm(cp, size, state): + raise NotImplementedError() + + +def adpcm2lin(cp, size, state): + raise NotImplementedError() diff --git a/sbapp/pydub/scipy_effects.py b/sbapp/pydub/scipy_effects.py new file mode 100644 index 0000000..abab2b4 --- /dev/null +++ b/sbapp/pydub/scipy_effects.py @@ -0,0 +1,175 @@ +""" +This module provides scipy versions of high_pass_filter, and low_pass_filter +as well as an additional band_pass_filter. + +Of course, you will need to install scipy for these to work. + +When this module is imported the high and low pass filters from this module +will be used when calling audio_segment.high_pass_filter() and +audio_segment.high_pass_filter() instead of the slower, less powerful versions +provided by pydub.effects. +""" +from scipy.signal import butter, sosfilt +from .utils import (register_pydub_effect,stereo_to_ms,ms_to_stereo) + + +def _mk_butter_filter(freq, type, order): + """ + Args: + freq: The cutoff frequency for highpass and lowpass filters. For + band filters, a list of [low_cutoff, high_cutoff] + type: "lowpass", "highpass", or "band" + order: nth order butterworth filter (default: 5th order). The + attenuation is -6dB/octave beyond the cutoff frequency (for 1st + order). A Higher order filter will have more attenuation, each level + adding an additional -6dB (so a 3rd order butterworth filter would + be -18dB/octave). + + Returns: + function which can filter a mono audio segment + + """ + def filter_fn(seg): + assert seg.channels == 1 + + nyq = 0.5 * seg.frame_rate + try: + freqs = [f / nyq for f in freq] + except TypeError: + freqs = freq / nyq + + sos = butter(order, freqs, btype=type, output='sos') + y = sosfilt(sos, seg.get_array_of_samples()) + + return seg._spawn(y.astype(seg.array_type)) + + return filter_fn + + +@register_pydub_effect +def band_pass_filter(seg, low_cutoff_freq, high_cutoff_freq, order=5): + filter_fn = _mk_butter_filter([low_cutoff_freq, high_cutoff_freq], 'band', order=order) + return seg.apply_mono_filter_to_each_channel(filter_fn) + + +@register_pydub_effect +def high_pass_filter(seg, cutoff_freq, order=5): + filter_fn = _mk_butter_filter(cutoff_freq, 'highpass', order=order) + return seg.apply_mono_filter_to_each_channel(filter_fn) + + +@register_pydub_effect +def low_pass_filter(seg, cutoff_freq, order=5): + filter_fn = _mk_butter_filter(cutoff_freq, 'lowpass', order=order) + return seg.apply_mono_filter_to_each_channel(filter_fn) + + +@register_pydub_effect +def _eq(seg, focus_freq, bandwidth=100, mode="peak", gain_dB=0, order=2): + """ + Args: + focus_freq - middle frequency or known frequency of band (in Hz) + bandwidth - range of the equalizer band + mode - Mode of Equalization(Peak/Notch(Bell Curve),High Shelf, Low Shelf) + order - Rolloff factor(1 - 6dB/Octave 2 - 12dB/Octave) + + Returns: + Equalized/Filtered AudioSegment + """ + filt_mode = ["peak", "low_shelf", "high_shelf"] + if mode not in filt_mode: + raise ValueError("Incorrect Mode Selection") + + if gain_dB >= 0: + if mode == "peak": + sec = band_pass_filter(seg, focus_freq - bandwidth/2, focus_freq + bandwidth/2, order = order) + seg = seg.overlay(sec - (3 - gain_dB)) + return seg + + if mode == "low_shelf": + sec = low_pass_filter(seg, focus_freq, order=order) + seg = seg.overlay(sec - (3 - gain_dB)) + return seg + + if mode == "high_shelf": + sec = high_pass_filter(seg, focus_freq, order=order) + seg = seg.overlay(sec - (3 - gain_dB)) + return seg + + if gain_dB < 0: + if mode == "peak": + sec = high_pass_filter(seg, focus_freq - bandwidth/2, order=order) + seg = seg.overlay(sec - (3 + gain_dB)) + gain_dB + sec = low_pass_filter(seg, focus_freq + bandwidth/2, order=order) + seg = seg.overlay(sec - (3 + gain_dB)) + gain_dB + return seg + + if mode == "low_shelf": + sec = high_pass_filter(seg, focus_freq, order=order) + seg = seg.overlay(sec - (3 + gain_dB)) + gain_dB + return seg + + if mode=="high_shelf": + sec=low_pass_filter(seg, focus_freq, order=order) + seg=seg.overlay(sec - (3 + gain_dB)) +gain_dB + return seg + + +@register_pydub_effect +def eq(seg, focus_freq, bandwidth=100, channel_mode="L+R", filter_mode="peak", gain_dB=0, order=2): + """ + Args: + focus_freq - middle frequency or known frequency of band (in Hz) + bandwidth - range of the equalizer band + channel_mode - Select Channels to be affected by the filter. + L+R - Standard Stereo Filter + L - Only Left Channel is Filtered + R - Only Right Channel is Filtered + M+S - Blumlien Stereo Filter(Mid-Side) + M - Only Mid Channel is Filtered + S - Only Side Channel is Filtered + Mono Audio Segments are completely filtered. + filter_mode - Mode of Equalization(Peak/Notch(Bell Curve),High Shelf, Low Shelf) + order - Rolloff factor(1 - 6dB/Octave 2 - 12dB/Octave) + + Returns: + Equalized/Filtered AudioSegment + """ + channel_modes = ["L+R", "M+S", "L", "R", "M", "S"] + if channel_mode not in channel_modes: + raise ValueError("Incorrect Channel Mode Selection") + + if seg.channels == 1: + return _eq(seg, focus_freq, bandwidth, filter_mode, gain_dB, order) + + if channel_mode == "L+R": + return _eq(seg, focus_freq, bandwidth, filter_mode, gain_dB, order) + + if channel_mode == "L": + seg = seg.split_to_mono() + seg = [_eq(seg[0], focus_freq, bandwidth, filter_mode, gain_dB, order), seg[1]] + return AudioSegment.from_mono_audio_segements(seg[0], seg[1]) + + if channel_mode == "R": + seg = seg.split_to_mono() + seg = [seg[0], _eq(seg[1], focus_freq, bandwidth, filter_mode, gain_dB, order)] + return AudioSegment.from_mono_audio_segements(seg[0], seg[1]) + + if channel_mode == "M+S": + seg = stereo_to_ms(seg) + seg = _eq(seg, focus_freq, bandwidth, filter_mode, gain_dB, order) + return ms_to_stereo(seg) + + if channel_mode == "M": + seg = stereo_to_ms(seg).split_to_mono() + seg = [_eq(seg[0], focus_freq, bandwidth, filter_mode, gain_dB, order), seg[1]] + seg = AudioSegment.from_mono_audio_segements(seg[0], seg[1]) + return ms_to_stereo(seg) + + if channel_mode == "S": + seg = stereo_to_ms(seg).split_to_mono() + seg = [seg[0], _eq(seg[1], focus_freq, bandwidth, filter_mode, gain_dB, order)] + seg = AudioSegment.from_mono_audio_segements(seg[0], seg[1]) + return ms_to_stereo(seg) + + diff --git a/sbapp/pydub/silence.py b/sbapp/pydub/silence.py new file mode 100644 index 0000000..0ad1499 --- /dev/null +++ b/sbapp/pydub/silence.py @@ -0,0 +1,182 @@ +""" +Various functions for finding/manipulating silence in AudioSegments +""" +import itertools + +from .utils import db_to_float + + +def detect_silence(audio_segment, min_silence_len=1000, silence_thresh=-16, seek_step=1): + """ + Returns a list of all silent sections [start, end] in milliseconds of audio_segment. + Inverse of detect_nonsilent() + + audio_segment - the segment to find silence in + min_silence_len - the minimum length for any silent section + silence_thresh - the upper bound for how quiet is silent in dFBS + seek_step - step size for interating over the segment in ms + """ + seg_len = len(audio_segment) + + # you can't have a silent portion of a sound that is longer than the sound + if seg_len < min_silence_len: + return [] + + # convert silence threshold to a float value (so we can compare it to rms) + silence_thresh = db_to_float(silence_thresh) * audio_segment.max_possible_amplitude + + # find silence and add start and end indicies to the to_cut list + silence_starts = [] + + # check successive (1 sec by default) chunk of sound for silence + # try a chunk at every "seek step" (or every chunk for a seek step == 1) + last_slice_start = seg_len - min_silence_len + slice_starts = range(0, last_slice_start + 1, seek_step) + + # guarantee last_slice_start is included in the range + # to make sure the last portion of the audio is searched + if last_slice_start % seek_step: + slice_starts = itertools.chain(slice_starts, [last_slice_start]) + + for i in slice_starts: + audio_slice = audio_segment[i:i + min_silence_len] + if audio_slice.rms <= silence_thresh: + silence_starts.append(i) + + # short circuit when there is no silence + if not silence_starts: + return [] + + # combine the silence we detected into ranges (start ms - end ms) + silent_ranges = [] + + prev_i = silence_starts.pop(0) + current_range_start = prev_i + + for silence_start_i in silence_starts: + continuous = (silence_start_i == prev_i + seek_step) + + # sometimes two small blips are enough for one particular slice to be + # non-silent, despite the silence all running together. Just combine + # the two overlapping silent ranges. + silence_has_gap = silence_start_i > (prev_i + min_silence_len) + + if not continuous and silence_has_gap: + silent_ranges.append([current_range_start, + prev_i + min_silence_len]) + current_range_start = silence_start_i + prev_i = silence_start_i + + silent_ranges.append([current_range_start, + prev_i + min_silence_len]) + + return silent_ranges + + +def detect_nonsilent(audio_segment, min_silence_len=1000, silence_thresh=-16, seek_step=1): + """ + Returns a list of all nonsilent sections [start, end] in milliseconds of audio_segment. + Inverse of detect_silent() + + audio_segment - the segment to find silence in + min_silence_len - the minimum length for any silent section + silence_thresh - the upper bound for how quiet is silent in dFBS + seek_step - step size for interating over the segment in ms + """ + silent_ranges = detect_silence(audio_segment, min_silence_len, silence_thresh, seek_step) + len_seg = len(audio_segment) + + # if there is no silence, the whole thing is nonsilent + if not silent_ranges: + return [[0, len_seg]] + + # short circuit when the whole audio segment is silent + if silent_ranges[0][0] == 0 and silent_ranges[0][1] == len_seg: + return [] + + prev_end_i = 0 + nonsilent_ranges = [] + for start_i, end_i in silent_ranges: + nonsilent_ranges.append([prev_end_i, start_i]) + prev_end_i = end_i + + if end_i != len_seg: + nonsilent_ranges.append([prev_end_i, len_seg]) + + if nonsilent_ranges[0] == [0, 0]: + nonsilent_ranges.pop(0) + + return nonsilent_ranges + + +def split_on_silence(audio_segment, min_silence_len=1000, silence_thresh=-16, keep_silence=100, + seek_step=1): + """ + Returns list of audio segments from splitting audio_segment on silent sections + + audio_segment - original pydub.AudioSegment() object + + min_silence_len - (in ms) minimum length of a silence to be used for + a split. default: 1000ms + + silence_thresh - (in dBFS) anything quieter than this will be + considered silence. default: -16dBFS + + keep_silence - (in ms or True/False) leave some silence at the beginning + and end of the chunks. Keeps the sound from sounding like it + is abruptly cut off. + When the length of the silence is less than the keep_silence duration + it is split evenly between the preceding and following non-silent + segments. + If True is specified, all the silence is kept, if False none is kept. + default: 100ms + + seek_step - step size for interating over the segment in ms + """ + + # from the itertools documentation + def pairwise(iterable): + "s -> (s0,s1), (s1,s2), (s2, s3), ..." + a, b = itertools.tee(iterable) + next(b, None) + return zip(a, b) + + if isinstance(keep_silence, bool): + keep_silence = len(audio_segment) if keep_silence else 0 + + output_ranges = [ + [ start - keep_silence, end + keep_silence ] + for (start,end) + in detect_nonsilent(audio_segment, min_silence_len, silence_thresh, seek_step) + ] + + for range_i, range_ii in pairwise(output_ranges): + last_end = range_i[1] + next_start = range_ii[0] + if next_start < last_end: + range_i[1] = (last_end+next_start)//2 + range_ii[0] = range_i[1] + + return [ + audio_segment[ max(start,0) : min(end,len(audio_segment)) ] + for start,end in output_ranges + ] + + +def detect_leading_silence(sound, silence_threshold=-50.0, chunk_size=10): + """ + Returns the millisecond/index that the leading silence ends. + + audio_segment - the segment to find silence in + silence_threshold - the upper bound for how quiet is silent in dFBS + chunk_size - chunk size for interating over the segment in ms + """ + trim_ms = 0 # ms + assert chunk_size > 0 # to avoid infinite loop + while sound[trim_ms:trim_ms+chunk_size].dBFS < silence_threshold and trim_ms < len(sound): + trim_ms += chunk_size + + # if there is no end it should return the length of the segment + return min(trim_ms, len(sound)) + + diff --git a/sbapp/pydub/utils.py b/sbapp/pydub/utils.py new file mode 100644 index 0000000..740c500 --- /dev/null +++ b/sbapp/pydub/utils.py @@ -0,0 +1,434 @@ +from __future__ import division + +import json +import os +import re +import sys +from subprocess import Popen, PIPE +from math import log, ceil +from tempfile import TemporaryFile +from warnings import warn +from functools import wraps + +try: + import audioop +except ImportError: + import pyaudioop as audioop + +if sys.version_info >= (3, 0): + basestring = str + +FRAME_WIDTHS = { + 8: 1, + 16: 2, + 32: 4, +} +ARRAY_TYPES = { + 8: "b", + 16: "h", + 32: "i", +} +ARRAY_RANGES = { + 8: (-0x80, 0x7f), + 16: (-0x8000, 0x7fff), + 32: (-0x80000000, 0x7fffffff), +} + + +def get_frame_width(bit_depth): + return FRAME_WIDTHS[bit_depth] + + +def get_array_type(bit_depth, signed=True): + t = ARRAY_TYPES[bit_depth] + if not signed: + t = t.upper() + return t + + +def get_min_max_value(bit_depth): + return ARRAY_RANGES[bit_depth] + + +def _fd_or_path_or_tempfile(fd, mode='w+b', tempfile=True): + close_fd = False + if fd is None and tempfile: + fd = TemporaryFile(mode=mode) + close_fd = True + + if isinstance(fd, basestring): + fd = open(fd, mode=mode) + close_fd = True + + try: + if isinstance(fd, os.PathLike): + fd = open(fd, mode=mode) + close_fd = True + except AttributeError: + # module os has no attribute PathLike, so we're on python < 3.6. + # The protocol we're trying to support doesn't exist, so just pass. + pass + + return fd, close_fd + + +def db_to_float(db, using_amplitude=True): + """ + Converts the input db to a float, which represents the equivalent + ratio in power. + """ + db = float(db) + if using_amplitude: + return 10 ** (db / 20) + else: # using power + return 10 ** (db / 10) + + +def ratio_to_db(ratio, val2=None, using_amplitude=True): + """ + Converts the input float to db, which represents the equivalent + to the ratio in power represented by the multiplier passed in. + """ + ratio = float(ratio) + + # accept 2 values and use the ratio of val1 to val2 + if val2 is not None: + ratio = ratio / val2 + + # special case for multiply-by-zero (convert to silence) + if ratio == 0: + return -float('inf') + + if using_amplitude: + return 20 * log(ratio, 10) + else: # using power + return 10 * log(ratio, 10) + + +def register_pydub_effect(fn, name=None): + """ + decorator for adding pydub effects to the AudioSegment objects. + example use: + @register_pydub_effect + def normalize(audio_segment): + ... + or you can specify a name: + @register_pydub_effect("normalize") + def normalize_audio_segment(audio_segment): + ... + """ + if isinstance(fn, basestring): + name = fn + return lambda fn: register_pydub_effect(fn, name) + + if name is None: + name = fn.__name__ + + from .audio_segment import AudioSegment + setattr(AudioSegment, name, fn) + return fn + + +def make_chunks(audio_segment, chunk_length): + """ + Breaks an AudioSegment into chunks that are milliseconds + long. + if chunk_length is 50 then you'll get a list of 50 millisecond long audio + segments back (except the last one, which can be shorter) + """ + number_of_chunks = ceil(len(audio_segment) / float(chunk_length)) + return [audio_segment[i * chunk_length:(i + 1) * chunk_length] + for i in range(int(number_of_chunks))] + + +def which(program): + """ + Mimics behavior of UNIX which command. + """ + # Add .exe program extension for windows support + if os.name == "nt" and not program.endswith(".exe"): + program += ".exe" + + envdir_list = [os.curdir] + os.environ["PATH"].split(os.pathsep) + + for envdir in envdir_list: + program_path = os.path.join(envdir, program) + if os.path.isfile(program_path) and os.access(program_path, os.X_OK): + return program_path + + +def get_encoder_name(): + """ + Return enconder default application for system, either avconv or ffmpeg + """ + if which("avconv"): + return "avconv" + elif which("ffmpeg"): + return "ffmpeg" + else: + # should raise exception + warn("Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work", RuntimeWarning) + return "ffmpeg" + + +def get_player_name(): + """ + Return enconder default application for system, either avconv or ffmpeg + """ + if which("avplay"): + return "avplay" + elif which("ffplay"): + return "ffplay" + else: + # should raise exception + warn("Couldn't find ffplay or avplay - defaulting to ffplay, but may not work", RuntimeWarning) + return "ffplay" + + +def get_prober_name(): + """ + Return probe application, either avconv or ffmpeg + """ + if which("avprobe"): + return "avprobe" + elif which("ffprobe"): + return "ffprobe" + else: + # should raise exception + warn("Couldn't find ffprobe or avprobe - defaulting to ffprobe, but may not work", RuntimeWarning) + return "ffprobe" + + +def fsdecode(filename): + """Wrapper for os.fsdecode which was introduced in python 3.2 .""" + + if sys.version_info >= (3, 2): + PathLikeTypes = (basestring, bytes) + if sys.version_info >= (3, 6): + PathLikeTypes += (os.PathLike,) + if isinstance(filename, PathLikeTypes): + return os.fsdecode(filename) + else: + if isinstance(filename, bytes): + return filename.decode(sys.getfilesystemencoding()) + if isinstance(filename, basestring): + return filename + + raise TypeError("type {0} not accepted by fsdecode".format(type(filename))) + + +def get_extra_info(stderr): + """ + avprobe sometimes gives more information on stderr than + on the json output. The information has to be extracted + from stderr of the format of: + ' Stream #0:0: Audio: flac, 88200 Hz, stereo, s32 (24 bit)' + or (macOS version): + ' Stream #0:0: Audio: vorbis' + ' 44100 Hz, stereo, fltp, 320 kb/s' + + :type stderr: str + :rtype: list of dict + """ + extra_info = {} + + re_stream = r'(?P +)Stream #0[:\.](?P([0-9]+))(?P.+)\n?(?! *Stream)((?P +)(?P.+))?' + for i in re.finditer(re_stream, stderr): + if i.group('space_end') is not None and len(i.group('space_start')) <= len( + i.group('space_end')): + content_line = ','.join([i.group('content_0'), i.group('content_1')]) + else: + content_line = i.group('content_0') + tokens = [x.strip() for x in re.split('[:,]', content_line) if x] + extra_info[int(i.group('stream_id'))] = tokens + return extra_info + + +def mediainfo_json(filepath, read_ahead_limit=-1): + """Return json dictionary with media info(codec, duration, size, bitrate...) from filepath + """ + prober = get_prober_name() + command_args = [ + "-v", "info", + "-show_format", + "-show_streams", + ] + try: + command_args += [fsdecode(filepath)] + stdin_parameter = None + stdin_data = None + except TypeError: + if prober == 'ffprobe': + command_args += ["-read_ahead_limit", str(read_ahead_limit), + "cache:pipe:0"] + else: + command_args += ["-"] + stdin_parameter = PIPE + file, close_file = _fd_or_path_or_tempfile(filepath, 'rb', tempfile=False) + file.seek(0) + stdin_data = file.read() + if close_file: + file.close() + + command = [prober, '-of', 'json'] + command_args + res = Popen(command, stdin=stdin_parameter, stdout=PIPE, stderr=PIPE) + output, stderr = res.communicate(input=stdin_data) + output = output.decode("utf-8", 'ignore') + stderr = stderr.decode("utf-8", 'ignore') + + info = json.loads(output) + + if not info: + # If ffprobe didn't give any information, just return it + # (for example, because the file doesn't exist) + return info + + extra_info = get_extra_info(stderr) + + audio_streams = [x for x in info['streams'] if x['codec_type'] == 'audio'] + if len(audio_streams) == 0: + return info + + # We just operate on the first audio stream in case there are more + stream = audio_streams[0] + + def set_property(stream, prop, value): + if prop not in stream or stream[prop] == 0: + stream[prop] = value + + for token in extra_info[stream['index']]: + m = re.match('([su]([0-9]{1,2})p?) \(([0-9]{1,2}) bit\)$', token) + m2 = re.match('([su]([0-9]{1,2})p?)( \(default\))?$', token) + if m: + set_property(stream, 'sample_fmt', m.group(1)) + set_property(stream, 'bits_per_sample', int(m.group(2))) + set_property(stream, 'bits_per_raw_sample', int(m.group(3))) + elif m2: + set_property(stream, 'sample_fmt', m2.group(1)) + set_property(stream, 'bits_per_sample', int(m2.group(2))) + set_property(stream, 'bits_per_raw_sample', int(m2.group(2))) + elif re.match('(flt)p?( \(default\))?$', token): + set_property(stream, 'sample_fmt', token) + set_property(stream, 'bits_per_sample', 32) + set_property(stream, 'bits_per_raw_sample', 32) + elif re.match('(dbl)p?( \(default\))?$', token): + set_property(stream, 'sample_fmt', token) + set_property(stream, 'bits_per_sample', 64) + set_property(stream, 'bits_per_raw_sample', 64) + return info + + +def mediainfo(filepath): + """Return dictionary with media info(codec, duration, size, bitrate...) from filepath + """ + + prober = get_prober_name() + command_args = [ + "-v", "quiet", + "-show_format", + "-show_streams", + filepath + ] + + command = [prober, '-of', 'old'] + command_args + res = Popen(command, stdout=PIPE) + output = res.communicate()[0].decode("utf-8") + + if res.returncode != 0: + command = [prober] + command_args + output = Popen(command, stdout=PIPE).communicate()[0].decode("utf-8") + + rgx = re.compile(r"(?:(?P.*?):)?(?P.*?)\=(?P.*?)$") + info = {} + + if sys.platform == 'win32': + output = output.replace("\r", "") + + for line in output.split("\n"): + # print(line) + mobj = rgx.match(line) + + if mobj: + # print(mobj.groups()) + inner_dict, key, value = mobj.groups() + + if inner_dict: + try: + info[inner_dict] + except KeyError: + info[inner_dict] = {} + info[inner_dict][key] = value + else: + info[key] = value + + return info + + +def cache_codecs(function): + cache = {} + + @wraps(function) + def wrapper(): + try: + return cache[0] + except: + cache[0] = function() + return cache[0] + + return wrapper + + +@cache_codecs +def get_supported_codecs(): + encoder = get_encoder_name() + command = [encoder, "-codecs"] + res = Popen(command, stdout=PIPE, stderr=PIPE) + output = res.communicate()[0].decode("utf-8") + if res.returncode != 0: + return [] + + if sys.platform == 'win32': + output = output.replace("\r", "") + + + rgx = re.compile(r"^([D.][E.][AVS.][I.][L.][S.]) (\w*) +(.*)") + decoders = set() + encoders = set() + for line in output.split('\n'): + match = rgx.match(line.strip()) + if not match: + continue + flags, codec, name = match.groups() + + if flags[0] == 'D': + decoders.add(codec) + + if flags[1] == 'E': + encoders.add(codec) + + return (decoders, encoders) + + +def get_supported_decoders(): + return get_supported_codecs()[0] + + +def get_supported_encoders(): + return get_supported_codecs()[1] + +def stereo_to_ms(audio_segment): + ''' + Left-Right -> Mid-Side + ''' + channel = audio_segment.split_to_mono() + channel = [channel[0].overlay(channel[1]), channel[0].overlay(channel[1].invert_phase())] + return AudioSegment.from_mono_audiosegments(channel[0], channel[1]) + +def ms_to_stereo(audio_segment): + ''' + Mid-Side -> Left-Right + ''' + channel = audio_segment.split_to_mono() + channel = [channel[0].overlay(channel[1]) - 3, channel[0].overlay(channel[1].invert_phase()) - 3] + return AudioSegment.from_mono_audiosegments(channel[0], channel[1]) + diff --git a/sbapp/pyogg/__init__.py b/sbapp/pyogg/__init__.py new file mode 100644 index 0000000..a97b0d2 --- /dev/null +++ b/sbapp/pyogg/__init__.py @@ -0,0 +1,108 @@ +import ctypes + +from .pyogg_error import PyOggError +from .ogg import PYOGG_OGG_AVAIL +from .vorbis import PYOGG_VORBIS_AVAIL, PYOGG_VORBIS_FILE_AVAIL, PYOGG_VORBIS_ENC_AVAIL +from .opus import PYOGG_OPUS_AVAIL, PYOGG_OPUS_FILE_AVAIL, PYOGG_OPUS_ENC_AVAIL +from .flac import PYOGG_FLAC_AVAIL + + +#: PyOgg version number. Versions should comply with PEP440. +__version__ = '0.7' + + +if (PYOGG_OGG_AVAIL and PYOGG_VORBIS_AVAIL and PYOGG_VORBIS_FILE_AVAIL): + # VorbisFile + from .vorbis_file import VorbisFile + # VorbisFileStream + from .vorbis_file_stream import VorbisFileStream + +else: + class VorbisFile: # type: ignore + def __init__(*args, **kw): + if not PYOGG_OGG_AVAIL: + raise PyOggError("The Ogg library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + raise PyOggError("The Vorbis libraries weren't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + + class VorbisFileStream: # type: ignore + def __init__(*args, **kw): + if not PYOGG_OGG_AVAIL: + raise PyOggError("The Ogg library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + raise PyOggError("The Vorbis libraries weren't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + + + +if (PYOGG_OGG_AVAIL and PYOGG_OPUS_AVAIL and PYOGG_OPUS_FILE_AVAIL): + # OpusFile + from .opus_file import OpusFile + # OpusFileStream + from .opus_file_stream import OpusFileStream + +else: + class OpusFile: # type: ignore + def __init__(*args, **kw): + if not PYOGG_OGG_AVAIL: + raise PyOggError("The Ogg library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + if not PYOGG_OPUS_AVAIL: + raise PyOggError("The Opus library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + if not PYOGG_OPUS_FILE_AVAIL: + raise PyOggError("The OpusFile library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + raise PyOggError("Unknown initialisation error") + + class OpusFileStream: # type: ignore + def __init__(*args, **kw): + if not PYOGG_OGG_AVAIL: + raise PyOggError("The Ogg library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + if not PYOGG_OPUS_AVAIL: + raise PyOggError("The Opus library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + if not PYOGG_OPUS_FILE_AVAIL: + raise PyOggError("The OpusFile library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + raise PyOggError("Unknown initialisation error") + + +if PYOGG_OPUS_AVAIL: + # OpusEncoder + from .opus_encoder import OpusEncoder + # OpusBufferedEncoder + from .opus_buffered_encoder import OpusBufferedEncoder + # OpusDecoder + from .opus_decoder import OpusDecoder + +else: + class OpusEncoder: # type: ignore + def __init__(*args, **kw): + raise PyOggError("The Opus library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + + class OpusBufferedEncoder: # type: ignore + def __init__(*args, **kw): + raise PyOggError("The Opus library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + + class OpusDecoder: # type: ignore + def __init__(*args, **kw): + raise PyOggError("The Opus library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + +if (PYOGG_OGG_AVAIL and PYOGG_OPUS_AVAIL): + # OggOpusWriter + from .ogg_opus_writer import OggOpusWriter + +else: + class OggOpusWriter: # type: ignore + def __init__(*args, **kw): + if not PYOGG_OGG_AVAIL: + raise PyOggError("The Ogg library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + raise PyOggError("The Opus library was't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + + +if PYOGG_FLAC_AVAIL: + # FlacFile + from .flac_file import FlacFile + # FlacFileStream + from .flac_file_stream import FlacFileStream +else: + class FlacFile: # type: ignore + def __init__(*args, **kw): + raise PyOggError("The FLAC libraries weren't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") + + class FlacFileStream: # type: ignore + def __init__(*args, **kw): + raise PyOggError("The FLAC libraries weren't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)") diff --git a/sbapp/pyogg/audio_file.py b/sbapp/pyogg/audio_file.py new file mode 100644 index 0000000..4fb77a2 --- /dev/null +++ b/sbapp/pyogg/audio_file.py @@ -0,0 +1,59 @@ +from .pyogg_error import PyOggError + +class AudioFile: + """Abstract base class for audio files. + + This class is a base class for audio files (such as Vorbis, Opus, + and FLAC). It should not be instatiated directly. + """ + + def __init__(self): + raise PyOggError("AudioFile is an Abstract Base Class "+ + "and should not be instantiated") + + def as_array(self): + """Returns the buffer as a NumPy array. + + The shape of the returned array is in units of (number of + samples per channel, number of channels). + + The data type is either 8-bit or 16-bit signed integers, + depending on bytes_per_sample. + + The buffer is not copied, but rather the NumPy array + shares the memory with the buffer. + + """ + # Assumes that self.buffer is a one-dimensional array of + # bytes and that channels are interleaved. + + import numpy # type: ignore + + assert self.buffer is not None + assert self.channels is not None + + # The following code assumes that the bytes in the buffer + # represent 8-bit or 16-bit signed ints. Ensure the number of + # bytes per sample matches that assumption. + assert self.bytes_per_sample == 1 or self.bytes_per_sample == 2 + + # Create a dictionary mapping bytes per sample to numpy data + # types + dtype = { + 1: numpy.int8, + 2: numpy.int16 + } + + # Convert the ctypes buffer to a NumPy array + array = numpy.frombuffer( + self.buffer, + dtype=dtype[self.bytes_per_sample] + ) + + # Reshape the array + return array.reshape( + (len(self.buffer) + // self.bytes_per_sample + // self.channels, + self.channels) + ) diff --git a/sbapp/pyogg/flac.py b/sbapp/pyogg/flac.py new file mode 100644 index 0000000..d44509e --- /dev/null +++ b/sbapp/pyogg/flac.py @@ -0,0 +1,2061 @@ +############################################################ +# Flac license: # +############################################################ +""" +Copyright (C) 2000-2009 Josh Coalson +Copyright (C) 2011-2016 Xiph.Org Foundation + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +- Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +- Neither the name of the Xiph.org Foundation nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import ctypes +from ctypes import c_int, c_int8, c_int16, c_int32, c_int64, c_uint, c_uint8, c_uint16, c_uint32, c_uint64, c_float, c_long, c_ulong, c_char, c_bool, c_char_p, c_ubyte, c_longlong, c_ulonglong, c_size_t, c_void_p, c_double, POINTER, pointer, cast, CFUNCTYPE, Structure, Union +import ctypes.util +import sys +from traceback import print_exc as _print_exc +import os + +from .ogg import * + +from .library_loader import ExternalLibrary, ExternalLibraryError + +__here = os.getcwd() + +libflac = None + +try: + names = { + "Windows": "libFLAC.dll", + "Darwin": "libFLAC.8.dylib", + "external": "FLAC" + } + libflac = Library.load(names, tests = [lambda lib: hasattr(lib, "FLAC__EntropyCodingMethodTypeString")]) +except ExternalLibraryError: + pass +except: + _print_exc() + +if libflac: + PYOGG_FLAC_AVAIL = True +else: + PYOGG_FLAC_AVAIL = False + +# ctypes +c_ubyte_p = POINTER(c_ubyte) +c_uchar_p = c_ubyte_p +c_uint_p = POINTER(c_uint) +c_size_t_p = POINTER(c_size_t) +c_off_t = c_int32 +# /ctypes + +if PYOGG_FLAC_AVAIL: + # Sanity check also satisfies mypy type checking + assert libflac is not None + + # ordinals + + FLAC__int8 = c_int8 + FLAC__uint8 = c_uint8 + + FLAC__int16 = c_int16 + + FLAC__int32 = c_int32 + FLAC__int32_p = POINTER(FLAC__int32) + + FLAC__int64 = c_int64 + FLAC__uint16 = c_uint16 + FLAC__uint32 = c_uint32 + FLAC__uint64 = c_uint64 + + FLAC__uint64_p = POINTER(FLAC__uint64) + + FLAC__bool = c_bool + + FLAC__byte = c_uint8 + + FLAC__byte_p = POINTER(FLAC__byte) + + c_char_p_p = POINTER(c_char_p) + + # /ordinals + + # callback + + FLAC__IOHandle = CFUNCTYPE(c_void_p) + + FLAC__IOCallback_Read = CFUNCTYPE(c_size_t, + c_void_p, + c_size_t, + c_size_t, + FLAC__IOHandle) + + FLAC__IOCallback_Write = CFUNCTYPE(c_size_t, c_void_p, c_size_t, c_size_t, FLAC__IOHandle) + + FLAC__IOCallback_Seek = CFUNCTYPE(c_int, FLAC__IOHandle, FLAC__int64, c_int) + + FLAC__IOCallback_Tell = CFUNCTYPE(FLAC__int64, FLAC__IOHandle) + + FLAC__IOCallback_Eof = CFUNCTYPE(c_int, FLAC__IOHandle) + + FLAC__IOCallback_Close = CFUNCTYPE(c_int, FLAC__IOHandle) + + class FLAC__IOCallbacks(Structure): + _fields_ = [("read", FLAC__IOCallback_Read), + ("write", FLAC__IOCallback_Write), + ("seek", FLAC__IOCallback_Seek), + ("tell", FLAC__IOCallback_Tell), + ("eof", FLAC__IOCallback_Eof), + ("close", FLAC__IOCallback_Close)] + + # /callback + + # format + + FLAC__MAX_METADATA_TYPE_CODE =(126) + FLAC__MIN_BLOCK_SIZE =(16) + FLAC__MAX_BLOCK_SIZE =(65535) + FLAC__SUBSET_MAX_BLOCK_SIZE_48000HZ =(4608) + FLAC__MAX_CHANNELS =(8) + FLAC__MIN_BITS_PER_SAMPLE =(4) + FLAC__MAX_BITS_PER_SAMPLE =(32) + FLAC__REFERENCE_CODEC_MAX_BITS_PER_SAMPLE =(24) + FLAC__MAX_SAMPLE_RATE =(655350) + FLAC__MAX_LPC_ORDER =(32) + FLAC__SUBSET_MAX_LPC_ORDER_48000HZ =(12) + FLAC__MIN_QLP_COEFF_PRECISION =(5) + FLAC__MAX_QLP_COEFF_PRECISION =(15) + FLAC__MAX_FIXED_ORDER =(4) + FLAC__MAX_RICE_PARTITION_ORDER =(15) + FLAC__SUBSET_MAX_RICE_PARTITION_ORDER =(8) + + FLAC__VERSION_STRING = c_char_p.in_dll(libflac, "FLAC__VERSION_STRING") + + FLAC__VENDOR_STRING = c_char_p.in_dll(libflac, "FLAC__VENDOR_STRING") + + FLAC__STREAM_SYNC_STRING = (FLAC__byte * 4).in_dll(libflac, "FLAC__STREAM_SYNC_STRING") + + FLAC__STREAM_SYNC = c_uint.in_dll(libflac, "FLAC__STREAM_SYNC") + + FLAC__STREAM_SYNC_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_SYNC_LEN") + + FLAC__STREAM_SYNC_LENGTH =(4) + + + + FLAC__EntropyCodingMethodType = c_int + + FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE = 0 + + FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE2 = 1 + + + + libflac.FLAC__EntropyCodingMethodTypeString.restype = c_char_p + libflac.FLAC__EntropyCodingMethodTypeString.argtypes = [] + + def FLAC__EntropyCodingMethodTypeString(): + return libflac.FLAC__EntropyCodingMethodTypeString() + + + + class FLAC__EntropyCodingMethod_PartitionedRiceContents(Structure): + _fields_ = [("parameters", c_uint_p), + ("raw_bits", c_uint_p), + ("capacity_by_order", c_uint)] + + class FLAC__EntropyCodingMethod_PartitionedRice(Structure): + _fields_ = [("order", c_uint), + ("contents", POINTER(FLAC__EntropyCodingMethod_PartitionedRiceContents))] + + + FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE_ORDER_LEN = c_uint.in_dll(libflac, "FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE_ORDER_LEN") + + FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE_PARAMETER_LEN = c_uint.in_dll(libflac, "FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE_PARAMETER_LEN") + + FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE2_PARAMETER_LEN = c_uint.in_dll(libflac, "FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE2_PARAMETER_LEN") + + FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE_RAW_LEN = c_uint.in_dll(libflac, "FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE_RAW_LEN") + + FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE_ESCAPE_PARAMETER = c_uint.in_dll(libflac, "FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE_ESCAPE_PARAMETER") + + + class FLAC__EntropyCodingMethod_data(Union): + _fields_ = [("partitioned_rice", FLAC__EntropyCodingMethod_PartitionedRice)] + + class FLAC__EntropyCodingMethod(Structure): + _fields_ = [("type", POINTER(FLAC__EntropyCodingMethodType)), + ("data", FLAC__EntropyCodingMethod_data)] + + FLAC__ENTROPY_CODING_METHOD_TYPE_LEN = c_uint.in_dll(libflac, "FLAC__ENTROPY_CODING_METHOD_TYPE_LEN") + + + + FLAC__SubframeType = c_int + FLAC__SUBFRAME_TYPE_CONSTANT = 0 + FLAC__SUBFRAME_TYPE_VERBATIM = 1 + FLAC__SUBFRAME_TYPE_FIXED = 2 + FLAC__SUBFRAME_TYPE_LPC = 3 + + + + libflac.FLAC__SubframeTypeString.restype = c_char_p + libflac.FLAC__SubframeTypeString.argtypes = [] + + def FLAC__SubframeTypeString(): + return libflac.FLAC__SubframeTypeString() + + + + class FLAC__Subframe_Constant(Structure): + _fields_ = [("value", FLAC__int32)] + + + class FLAC__Subframe_Verbatim(Structure): + _fields_ = [("data", FLAC__int32_p)] + + + class FLAC__Subframe_Fixed(Structure): + _fields_ = [("entropy_coding_method", FLAC__EntropyCodingMethod), + ("order", c_uint), + ("warmup", FLAC__int32 * FLAC__MAX_FIXED_ORDER), + ("residual", FLAC__int32_p)] + + + class FLAC__Subframe_LPC(Structure): + _fields_ = [("entropy_coding_method", FLAC__EntropyCodingMethod), + ("order", c_uint), + ("qlp_coeff_precision", c_uint), + ("quantization_level", c_int), + ("qlp_coeff", FLAC__int32 * FLAC__MAX_LPC_ORDER), + ("warmup", FLAC__int32 * FLAC__MAX_LPC_ORDER), + ("residual", FLAC__int32_p)] + + + FLAC__SUBFRAME_LPC_QLP_COEFF_PRECISION_LEN = c_uint.in_dll(libflac, "FLAC__SUBFRAME_LPC_QLP_COEFF_PRECISION_LEN") + + FLAC__SUBFRAME_LPC_QLP_SHIFT_LEN = c_uint.in_dll(libflac, "FLAC__SUBFRAME_LPC_QLP_SHIFT_LEN") + + + + class FLAC__Subframe_data(Union): + _fields_ = [("constant", FLAC__Subframe_Constant), + ("fixed", FLAC__Subframe_Fixed), + ("lpc", FLAC__Subframe_LPC), + ("verbatim", FLAC__Subframe_Verbatim)] + + class FLAC__Subframe(Structure): + _fields_ = [("type", FLAC__SubframeType), + ("data", FLAC__Subframe_data), + ("wasted_bits", c_uint)] + + + FLAC__SUBFRAME_ZERO_PAD_LEN = c_uint.in_dll(libflac, "FLAC__SUBFRAME_ZERO_PAD_LEN") + + FLAC__SUBFRAME_TYPE_LEN = c_uint.in_dll(libflac, "FLAC__SUBFRAME_TYPE_LEN") + + FLAC__SUBFRAME_WASTED_BITS_FLAG_LEN = c_uint.in_dll(libflac, "FLAC__SUBFRAME_WASTED_BITS_FLAG_LEN") + + FLAC__SUBFRAME_TYPE_CONSTANT_BYTE_ALIGNED_MASK = c_uint.in_dll(libflac, "FLAC__SUBFRAME_TYPE_CONSTANT_BYTE_ALIGNED_MASK") + + FLAC__SUBFRAME_TYPE_VERBATIM_BYTE_ALIGNED_MASK = c_uint.in_dll(libflac, "FLAC__SUBFRAME_TYPE_VERBATIM_BYTE_ALIGNED_MASK") + + FLAC__SUBFRAME_TYPE_FIXED_BYTE_ALIGNED_MASK = c_uint.in_dll(libflac, "FLAC__SUBFRAME_TYPE_FIXED_BYTE_ALIGNED_MASK") + + FLAC__SUBFRAME_TYPE_LPC_BYTE_ALIGNED_MASK = c_uint.in_dll(libflac, "FLAC__SUBFRAME_TYPE_LPC_BYTE_ALIGNED_MASK") + + + FLAC__ChannelAssignment = c_int + + FLAC__CHANNEL_ASSIGNMENT_INDEPENDENT = 0 + FLAC__CHANNEL_ASSIGNMENT_LEFT_SIDE = 1 + FLAC__CHANNEL_ASSIGNMENT_RIGHT_SIDE = 2 + FLAC__CHANNEL_ASSIGNMENT_MID_SIDE = 3 + + + + libflac.FLAC__ChannelAssignmentString.restype = c_char_p + libflac.FLAC__ChannelAssignmentString.argtypes = [] + + def FLAC__ChannelAssignmentString(): + return libflac.FLAC__ChannelAssignmentString() + + FLAC__FrameNumberType = c_int + + + libflac.FLAC__FrameNumberTypeString.restype = c_char_p + libflac.FLAC__FrameNumberTypeString.argtypes = [] + + def FLAC__FrameNumberTypeString(): + return libflac.FLAC__FrameNumberTypeString() + + + class FLAC__FrameHeader_number(Union): + _fields_ =[("frame_number", FLAC__uint32), + ("sample_number", FLAC__uint64)] + + class FLAC__FrameHeader(Structure): + _fields_ = [("blocksize", c_uint), + ("sample_rate", c_uint), + ("channels", c_uint), + ("channel_assignment", FLAC__ChannelAssignment), + ("bits_per_sample", c_uint), + ("number_type", FLAC__FrameNumberType), + ("number", FLAC__FrameHeader_number), + ("crc", FLAC__uint8)] + + + FLAC__FRAME_HEADER_SYNC = c_uint.in_dll(libflac, "FLAC__FRAME_HEADER_SYNC") + + FLAC__FRAME_HEADER_RESERVED_LEN = c_uint.in_dll(libflac, "FLAC__FRAME_HEADER_RESERVED_LEN") + + FLAC__FRAME_HEADER_BLOCKING_STRATEGY_LEN = c_uint.in_dll(libflac, "FLAC__FRAME_HEADER_BLOCKING_STRATEGY_LEN") + + FLAC__FRAME_HEADER_BLOCK_SIZE_LEN = c_uint.in_dll(libflac, "FLAC__FRAME_HEADER_BLOCK_SIZE_LEN") + + FLAC__FRAME_HEADER_SAMPLE_RATE_LEN = c_uint.in_dll(libflac, "FLAC__FRAME_HEADER_SAMPLE_RATE_LEN") + + FLAC__FRAME_HEADER_CHANNEL_ASSIGNMENT_LEN = c_uint.in_dll(libflac, "FLAC__FRAME_HEADER_CHANNEL_ASSIGNMENT_LEN") + + FLAC__FRAME_HEADER_BITS_PER_SAMPLE_LEN = c_uint.in_dll(libflac, "FLAC__FRAME_HEADER_BITS_PER_SAMPLE_LEN") + + FLAC__FRAME_HEADER_ZERO_PAD_LEN = c_uint.in_dll(libflac, "FLAC__FRAME_HEADER_ZERO_PAD_LEN") + + FLAC__FRAME_HEADER_CRC_LEN = c_uint.in_dll(libflac, "FLAC__FRAME_HEADER_CRC_LEN") + + + + class FLAC__FrameFooter(Structure): + _fields_ = [("crc", FLAC__uint16)] + + FLAC__FRAME_FOOTER_CRC_LEN = c_uint.in_dll(libflac, "FLAC__FRAME_FOOTER_CRC_LEN") + + + + class FLAC__Frame(Structure): + _fields_ = [("header", FLAC__FrameHeader), + ("subframes", FLAC__Subframe * FLAC__MAX_CHANNELS), + ("footer", FLAC__FrameFooter)] + + + FLAC__MetadataType = c_int + + FLAC__METADATA_TYPE_STREAMINFO = 0 + + FLAC__METADATA_TYPE_PADDING = 1 + + FLAC__METADATA_TYPE_APPLICATION = 2 + + FLAC__METADATA_TYPE_SEEKTABLE = 3 + + FLAC__METADATA_TYPE_VORBIS_COMMENT = 4 + + FLAC__METADATA_TYPE_CUESHEET = 5 + + FLAC__METADATA_TYPE_PICTURE = 6 + + FLAC__METADATA_TYPE_UNDEFINED = 7 + + FLAC__MAX_METADATA_TYPE = FLAC__MAX_METADATA_TYPE_CODE + + + + libflac.FLAC__MetadataTypeString.restype = c_char_p + libflac.FLAC__MetadataTypeString.argtypes = [] + + def FLAC__MetadataTypeString(): + return libflac.FLAC__MetadataTypeString() + + + + class FLAC__StreamMetadata_StreamInfo(Structure): + _fields_ = [("min_blocksize", c_uint), + ("max_framesize", c_uint), + ("min_framesize", c_uint), + ("max_framesize", c_uint), + ("sample_rate", c_uint), + ("channels", c_uint), + ("bits_per_sample", c_uint), + ("total_samples", FLAC__uint64), + ("md5sum", FLAC__byte*16)] + + FLAC__STREAM_METADATA_STREAMINFO_MIN_BLOCK_SIZE_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_STREAMINFO_MIN_BLOCK_SIZE_LEN") + + FLAC__STREAM_METADATA_STREAMINFO_MAX_BLOCK_SIZE_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_STREAMINFO_MAX_BLOCK_SIZE_LEN") + + FLAC__STREAM_METADATA_STREAMINFO_MIN_FRAME_SIZE_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_STREAMINFO_MIN_FRAME_SIZE_LEN") + + FLAC__STREAM_METADATA_STREAMINFO_MAX_FRAME_SIZE_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_STREAMINFO_MAX_FRAME_SIZE_LEN") + + FLAC__STREAM_METADATA_STREAMINFO_SAMPLE_RATE_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_STREAMINFO_SAMPLE_RATE_LEN") + + + FLAC__STREAM_METADATA_STREAMINFO_CHANNELS_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_STREAMINFO_CHANNELS_LEN") + + FLAC__STREAM_METADATA_STREAMINFO_BITS_PER_SAMPLE_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_STREAMINFO_BITS_PER_SAMPLE_LEN") + + FLAC__STREAM_METADATA_STREAMINFO_TOTAL_SAMPLES_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_STREAMINFO_TOTAL_SAMPLES_LEN") + + FLAC__STREAM_METADATA_STREAMINFO_MD5SUM_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_STREAMINFO_MD5SUM_LEN") + + FLAC__STREAM_METADATA_STREAMINFO_LENGTH =(34) + + + class FLAC__StreamMetadata_Padding(Structure): + _fields_ = [("dummy", c_int)] + + + + class FLAC__StreamMetadata_Application(Structure): + _fields_ = [("id", FLAC__byte*4), + ("data", FLAC__byte_p)] + + FLAC__STREAM_METADATA_APPLICATION_ID_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_APPLICATION_ID_LEN") + + + class FLAC__StreamMetadata_SeekPoint(Structure): + _fields_ = [("sample_number", FLAC__uint64), + ("stream_offset", FLAC__uint64), + ("frame_samples", c_uint)] + + FLAC__STREAM_METADATA_SEEKPOINT_SAMPLE_NUMBER_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_SEEKPOINT_SAMPLE_NUMBER_LEN") + + FLAC__STREAM_METADATA_SEEKPOINT_STREAM_OFFSET_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_SEEKPOINT_STREAM_OFFSET_LEN") + + FLAC__STREAM_METADATA_SEEKPOINT_FRAME_SAMPLES_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_SEEKPOINT_FRAME_SAMPLES_LEN") + + FLAC__STREAM_METADATA_SEEKPOINT_LENGTH =(18) + + + FLAC__STREAM_METADATA_SEEKPOINT_PLACEHOLDER = FLAC__uint64.in_dll(libflac, "FLAC__STREAM_METADATA_SEEKPOINT_PLACEHOLDER") + + class FLAC__StreamMetadata_SeekTable(Structure): + _fields_ = [("num_points", c_uint), + ("points", POINTER(FLAC__StreamMetadata_SeekPoint))] + + class FLAC__StreamMetadata_VorbisComment_Entry(Structure): + _fields_ = [("length", FLAC__uint32), + ("entry", FLAC__byte_p)] + + FLAC__STREAM_METADATA_VORBIS_COMMENT_ENTRY_LENGTH_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_VORBIS_COMMENT_ENTRY_LENGTH_LEN") + + + class FLAC__StreamMetadata_VorbisComment(Structure): + _fields_ = [("vendor_string", FLAC__StreamMetadata_VorbisComment_Entry), + ("num_comments", FLAC__uint32), + ("comments", POINTER(FLAC__StreamMetadata_VorbisComment_Entry))] + + FLAC__STREAM_METADATA_VORBIS_COMMENT_NUM_COMMENTS_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_VORBIS_COMMENT_NUM_COMMENTS_LEN") + + + class FLAC__StreamMetadata_CueSheet_Index(Structure): + _fields_ = [("offset", FLAC__uint64), + ("number", FLAC__byte)] + + + FLAC__STREAM_METADATA_CUESHEET_INDEX_OFFSET_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_CUESHEET_INDEX_OFFSET_LEN") + + FLAC__STREAM_METADATA_CUESHEET_INDEX_NUMBER_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_CUESHEET_INDEX_NUMBER_LEN") + + FLAC__STREAM_METADATA_CUESHEET_INDEX_RESERVED_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_CUESHEET_INDEX_RESERVED_LEN") + + + class FLAC__StreamMetadata_CueSheet_Track(Structure): + _fields_ = [("offset", FLAC__uint64), + ("number", FLAC__byte), + ("isrc", c_char*13), + ("type", c_uint), + ("pre_emphasis", c_uint), + ("num_indices", FLAC__byte), + ("indices", POINTER(FLAC__StreamMetadata_CueSheet_Index))] + + FLAC__STREAM_METADATA_CUESHEET_TRACK_OFFSET_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_CUESHEET_TRACK_OFFSET_LEN") + + FLAC__STREAM_METADATA_CUESHEET_TRACK_NUMBER_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_CUESHEET_TRACK_NUMBER_LEN") + + FLAC__STREAM_METADATA_CUESHEET_TRACK_ISRC_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_CUESHEET_TRACK_ISRC_LEN") + + FLAC__STREAM_METADATA_CUESHEET_TRACK_TYPE_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_CUESHEET_TRACK_TYPE_LEN") + + FLAC__STREAM_METADATA_CUESHEET_TRACK_PRE_EMPHASIS_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_CUESHEET_TRACK_PRE_EMPHASIS_LEN") + + FLAC__STREAM_METADATA_CUESHEET_TRACK_RESERVED_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_CUESHEET_TRACK_RESERVED_LEN") + + FLAC__STREAM_METADATA_CUESHEET_TRACK_NUM_INDICES_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_CUESHEET_TRACK_NUM_INDICES_LEN") + + + class FLAC__StreamMetadata_CueSheet(Structure): + _fields_ = [("media_catalog_number", c_char*129), + ("lead_in", FLAC__uint64), + ("is_cd", FLAC__bool), + ("num_tracks", c_uint), + ("tracks", POINTER(FLAC__StreamMetadata_CueSheet_Track))] + + FLAC__STREAM_METADATA_CUESHEET_MEDIA_CATALOG_NUMBER_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_CUESHEET_MEDIA_CATALOG_NUMBER_LEN") + + + FLAC__STREAM_METADATA_CUESHEET_LEAD_IN_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_CUESHEET_LEAD_IN_LEN") + + FLAC__STREAM_METADATA_CUESHEET_IS_CD_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_CUESHEET_IS_CD_LEN") + + FLAC__STREAM_METADATA_CUESHEET_RESERVED_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_CUESHEET_RESERVED_LEN") + + FLAC__STREAM_METADATA_CUESHEET_NUM_TRACKS_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_CUESHEET_NUM_TRACKS_LEN") + + + FLAC__StreamMetadata_Picture_Type = c_int + FLAC__STREAM_METADATA_PICTURE_TYPE_OTHER = 0 + FLAC__STREAM_METADATA_PICTURE_TYPE_FILE_ICON_STANDARD = 1 + FLAC__STREAM_METADATA_PICTURE_TYPE_FILE_ICON = 2 + FLAC__STREAM_METADATA_PICTURE_TYPE_FRONT_COVER = 3 + FLAC__STREAM_METADATA_PICTURE_TYPE_BACK_COVER = 4 + FLAC__STREAM_METADATA_PICTURE_TYPE_LEAFLET_PAGE = 5 + FLAC__STREAM_METADATA_PICTURE_TYPE_MEDIA = 6 + FLAC__STREAM_METADATA_PICTURE_TYPE_LEAD_ARTIST = 7 + FLAC__STREAM_METADATA_PICTURE_TYPE_ARTIST = 8 + FLAC__STREAM_METADATA_PICTURE_TYPE_CONDUCTOR = 9 + FLAC__STREAM_METADATA_PICTURE_TYPE_BAND = 10 + FLAC__STREAM_METADATA_PICTURE_TYPE_COMPOSER = 11 + FLAC__STREAM_METADATA_PICTURE_TYPE_LYRICIST = 12 + FLAC__STREAM_METADATA_PICTURE_TYPE_RECORDING_LOCATION = 13 + FLAC__STREAM_METADATA_PICTURE_TYPE_DURING_RECORDING = 14 + FLAC__STREAM_METADATA_PICTURE_TYPE_DURING_PERFORMANCE = 15 + FLAC__STREAM_METADATA_PICTURE_TYPE_VIDEO_SCREEN_CAPTURE = 16 + FLAC__STREAM_METADATA_PICTURE_TYPE_FISH = 17 + FLAC__STREAM_METADATA_PICTURE_TYPE_ILLUSTRATION = 18 + FLAC__STREAM_METADATA_PICTURE_TYPE_BAND_LOGOTYPE = 19 + FLAC__STREAM_METADATA_PICTURE_TYPE_PUBLISHER_LOGOTYPE = 20 + + + libflac.FLAC__StreamMetadata_Picture_TypeString.restype = c_char_p + libflac.FLAC__StreamMetadata_Picture_TypeString.argtypes = [] + + def FLAC__StreamMetadata_Picture_TypeString(): + return libflac.FLAC__StreamMetadata_Picture_TypeString() + + + class FLAC__StreamMetadata_Picture(Structure): + _fields_ = [("type", FLAC__StreamMetadata_Picture_Type), + ("mime_type", c_char_p), + ("description", FLAC__byte_p), + ("width", FLAC__uint32), + ("height", FLAC__uint32), + ("depth", FLAC__uint32), + ("colors", FLAC__uint32), + ("data_length", FLAC__uint32), + ("data", FLAC__byte)] + + FLAC__STREAM_METADATA_PICTURE_TYPE_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_PICTURE_TYPE_LEN") + + FLAC__STREAM_METADATA_PICTURE_MIME_TYPE_LENGTH_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_PICTURE_MIME_TYPE_LENGTH_LEN") + + FLAC__STREAM_METADATA_PICTURE_DESCRIPTION_LENGTH_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_PICTURE_DESCRIPTION_LENGTH_LEN") + + FLAC__STREAM_METADATA_PICTURE_WIDTH_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_PICTURE_WIDTH_LEN") + + FLAC__STREAM_METADATA_PICTURE_HEIGHT_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_PICTURE_HEIGHT_LEN") + + + FLAC__STREAM_METADATA_PICTURE_DEPTH_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_PICTURE_DEPTH_LEN") + + FLAC__STREAM_METADATA_PICTURE_COLORS_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_PICTURE_COLORS_LEN") + + FLAC__STREAM_METADATA_PICTURE_DATA_LENGTH_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_PICTURE_DATA_LENGTH_LEN") + + + class FLAC__StreamMetadata_Unknown(Structure): + _fields_ = [("data", FLAC__byte_p)] + + + class FLAC__StreamMetadata_data(Union): + _fields_ = [("stream_info", FLAC__StreamMetadata_StreamInfo), + ("padding", FLAC__StreamMetadata_Padding), + ("application", FLAC__StreamMetadata_Application), + ("seek_table", FLAC__StreamMetadata_SeekTable), + ("vorbis_comment", FLAC__StreamMetadata_VorbisComment), + ("cue_sheet", FLAC__StreamMetadata_CueSheet), + ("picture", FLAC__StreamMetadata_Picture), + ("unknown", FLAC__StreamMetadata_Unknown)] + + class FLAC__StreamMetadata(Structure): + _fields_ = [("type", FLAC__MetadataType), + ("is_last", FLAC__bool), + ("length", c_uint), + ("data", FLAC__StreamMetadata_data)] + + FLAC__STREAM_METADATA_IS_LAST_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_IS_LAST_LEN") + + FLAC__STREAM_METADATA_TYPE_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_TYPE_LEN") + + FLAC__STREAM_METADATA_LENGTH_LEN = c_uint.in_dll(libflac, "FLAC__STREAM_METADATA_LENGTH_LEN") + + FLAC__STREAM_METADATA_HEADER_LENGTH =(4) + + + + libflac.FLAC__format_sample_rate_is_valid.restype = FLAC__bool + libflac.FLAC__format_sample_rate_is_valid.argtypes = [c_uint] + + def FLAC__format_sample_rate_is_valid(sample_rate): + return libflac.FLAC__format_sample_rate_is_valid(sample_rate) + + + libflac.FLAC__format_blocksize_is_subset.restype = FLAC__bool + libflac.FLAC__format_blocksize_is_subset.argtypes = [c_uint, c_uint] + + def FLAC__format_blocksize_is_subset(blocksize, sample_rate): + return libflac.FLAC__format_blocksize_is_subset(blocksize, sample_rate) + + + libflac.FLAC__format_sample_rate_is_subset.restype = FLAC__bool + libflac.FLAC__format_sample_rate_is_subset.argtypes = [c_uint] + + def FLAC__format_sample_rate_is_subset(sample_rate): + return libflac.FLAC__format_sample_rate_is_subset(sample_rate) + + + libflac.FLAC__format_vorbiscomment_entry_name_is_legal.restype = FLAC__bool + libflac.FLAC__format_vorbiscomment_entry_name_is_legal.argtypes = [c_char_p] + + def FLAC__format_vorbiscomment_entry_name_is_legal(name): + return libflac.FLAC__format_vorbiscomment_entry_name_is_legal(name) + + libflac.FLAC__format_vorbiscomment_entry_value_is_legal.restype = FLAC__bool + libflac.FLAC__format_vorbiscomment_entry_value_is_legal.argtypes = [FLAC__byte_p, c_uint] + + def FLAC__format_vorbiscomment_entry_value_is_legal(value, length): + return libflac.FLAC__format_vorbiscomment_entry_value_is_legal(value, length) + + libflac.FLAC__format_vorbiscomment_entry_is_legal.restype = FLAC__bool + libflac.FLAC__format_vorbiscomment_entry_is_legal.argtypes = [FLAC__byte_p, c_uint] + + def FLAC__format_vorbiscomment_entry_is_legal(entry, length): + return libflac.FLAC__format_vorbiscomment_entry_is_legal(entry, length) + + libflac.FLAC__format_seektable_is_legal.restype = FLAC__bool + libflac.FLAC__format_seektable_is_legal.argtypes = [POINTER(FLAC__StreamMetadata_SeekTable)] + + def FLAC__format_seektable_is_legal(seek_table): + return libflac.FLAC__format_seektable_is_legal(seek_table) + + + libflac.FLAC__format_seektable_sort.restype = FLAC__bool + libflac.FLAC__format_seektable_sort.argtypes = [POINTER(FLAC__StreamMetadata_SeekTable)] + + def FLAC__format_seektable_sort(seek_table): + return libflac.FLAC__format_seektable_sort(seek_table) + + libflac.FLAC__format_cuesheet_is_legal.restype = FLAC__bool + libflac.FLAC__format_cuesheet_is_legal.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet), FLAC__bool, c_char_p_p] + + def FLAC__format_cuesheet_is_legal(cue_sheet, check_cd_da_subset, violation): + return libflac.FLAC__format_cuesheet_is_legal(cue_sheet, check_cd_da_subset, violation) + + # /format + + # metadata + + libflac.FLAC__metadata_get_streaminfo.restype = FLAC__bool + libflac.FLAC__metadata_get_streaminfo.argtypes = [c_char_p, POINTER(FLAC__StreamMetadata)] + + def FLAC__metadata_get_streaminfo(filename, streaminfo): + return libflac.FLAC__metadata_get_streaminfo(filename, streaminfo) + + libflac.FLAC__metadata_get_tags.restype = FLAC__bool + libflac.FLAC__metadata_get_tags.argtypes = [c_char_p, POINTER(POINTER(FLAC__StreamMetadata))] + + def FLAC__metadata_get_tags(filename, tags): + return libflac.FLAC__metadata_get_tags(filename, tags) + + libflac.FLAC__metadata_get_cuesheet.restype = FLAC__bool + libflac.FLAC__metadata_get_cuesheet.argtypes = [c_char_p, POINTER(POINTER(FLAC__StreamMetadata))] + + def FLAC__metadata_get_cuesheet(filename, cuesheet): + return libflac.FLAC__metadata_get_cuesheet(filename, cuesheet) + + libflac.FLAC__metadata_get_picture.restype = FLAC__bool + libflac.FLAC__metadata_get_picture.argtypes = [c_char_p, POINTER(POINTER(FLAC__StreamMetadata)), FLAC__StreamMetadata_Picture_Type, c_char_p, FLAC__byte_p, c_uint, c_uint, c_uint, c_uint] + + def FLAC__metadata_get_picture(filename, picture, type, mime_type, description, max_width, max_height, max_depth, max_colors): + return libflac.FLAC__metadata_get_picture(filename, picture, type, mime_type, description, max_width, max_height, max_depth, max_colors) + + + class FLAC__Metadata_SimpleIterator(Structure): + _fields_ = [("dummy", c_int)] + + FLAC__Metadata_SimpleIteratorStatus = c_int + + FLAC__METADATA_SIMPLE_ITERATOR_STATUS_OK = 0 + + + libflac.FLAC__Metadata_SimpleIteratorStatusString.restype = c_char_p + libflac.FLAC__Metadata_SimpleIteratorStatusString.argtypes = [] + + def FLAC__Metadata_SimpleIteratorStatusString(): + return libflac.FLAC__Metadata_SimpleIteratorStatusString() + + + libflac.FLAC__metadata_simple_iterator_new.restype = POINTER(FLAC__Metadata_SimpleIterator) + libflac.FLAC__metadata_simple_iterator_new.argtypes = [] + + def FLAC__metadata_simple_iterator_new(): + return libflac.FLAC__metadata_simple_iterator_new() + + + libflac.FLAC__metadata_simple_iterator_delete.restype = None + libflac.FLAC__metadata_simple_iterator_delete.argtypes = [POINTER(FLAC__Metadata_SimpleIterator)] + + def FLAC__metadata_simple_iterator_delete(iterator): + return libflac.FLAC__metadata_simple_iterator_delete(iterator) + + + libflac.FLAC__metadata_simple_iterator_status.restype = FLAC__Metadata_SimpleIteratorStatus + libflac.FLAC__metadata_simple_iterator_status.argtypes = [POINTER(FLAC__Metadata_SimpleIterator)] + + def FLAC__metadata_simple_iterator_status(iterator): + return libflac.FLAC__metadata_simple_iterator_status(iterator) + + libflac.FLAC__metadata_simple_iterator_init.restype = FLAC__bool + libflac.FLAC__metadata_simple_iterator_init.argtypes = [POINTER(FLAC__Metadata_SimpleIterator), c_char_p, FLAC__bool, FLAC__bool] + + def FLAC__metadata_simple_iterator_init(iterator, filename, read_only, preserve_file_stats): + return libflac.FLAC__metadata_simple_iterator_init(iterator, filename, read_only, preserve_file_stats) + + libflac.FLAC__metadata_simple_iterator_is_writable.restype = FLAC__bool + libflac.FLAC__metadata_simple_iterator_is_writable.argtypes = [POINTER(FLAC__Metadata_SimpleIterator)] + + def FLAC__metadata_simple_iterator_is_writable(iterator): + return libflac.FLAC__metadata_simple_iterator_is_writable(iterator) + + libflac.FLAC__metadata_simple_iterator_next.restype = FLAC__bool + libflac.FLAC__metadata_simple_iterator_next.argtypes = [POINTER(FLAC__Metadata_SimpleIterator)] + + def FLAC__metadata_simple_iterator_next(iterator): + return libflac.FLAC__metadata_simple_iterator_next(iterator) + + libflac.FLAC__metadata_simple_iterator_prev.restype = FLAC__bool + libflac.FLAC__metadata_simple_iterator_prev.argtypes = [POINTER(FLAC__Metadata_SimpleIterator)] + + def FLAC__metadata_simple_iterator_prev(iterator): + return libflac.FLAC__metadata_simple_iterator_prev(iterator) + + libflac.FLAC__metadata_simple_iterator_is_last.restype = FLAC__bool + libflac.FLAC__metadata_simple_iterator_is_last.argtypes = [POINTER(FLAC__Metadata_SimpleIterator)] + + def FLAC__metadata_simple_iterator_is_last(iterator): + return libflac.FLAC__metadata_simple_iterator_is_last(iterator) + + libflac.FLAC__metadata_simple_iterator_get_block_offset.restype = c_off_t + libflac.FLAC__metadata_simple_iterator_get_block_offset.argtypes = [POINTER(FLAC__Metadata_SimpleIterator)] + + def FLAC__metadata_simple_iterator_get_block_offset(iterator): + return libflac.FLAC__metadata_simple_iterator_get_block_offset(iterator) + + libflac.FLAC__metadata_simple_iterator_get_block_type.restype = FLAC__MetadataType + libflac.FLAC__metadata_simple_iterator_get_block_type.argtypes = [POINTER(FLAC__Metadata_SimpleIterator)] + + def FLAC__metadata_simple_iterator_get_block_type(iterator): + return libflac.FLAC__metadata_simple_iterator_get_block_type(iterator) + + libflac.FLAC__metadata_simple_iterator_get_block_length.restype = c_uint + libflac.FLAC__metadata_simple_iterator_get_block_length.argtypes = [POINTER(FLAC__Metadata_SimpleIterator)] + + def FLAC__metadata_simple_iterator_get_block_length(iterator): + return libflac.FLAC__metadata_simple_iterator_get_block_length(iterator) + + libflac.FLAC__metadata_simple_iterator_get_application_id.restype = FLAC__bool + libflac.FLAC__metadata_simple_iterator_get_application_id.argtypes = [POINTER(FLAC__Metadata_SimpleIterator), FLAC__byte_p] + + def FLAC__metadata_simple_iterator_get_application_id(iterator, id): + return libflac.FLAC__metadata_simple_iterator_get_application_id(iterator, id) + + libflac.FLAC__metadata_simple_iterator_get_block.restype = POINTER(FLAC__StreamMetadata) + libflac.FLAC__metadata_simple_iterator_get_block.argtypes = [POINTER(FLAC__Metadata_SimpleIterator)] + + def FLAC__metadata_simple_iterator_get_block(iterator): + return libflac.FLAC__metadata_simple_iterator_get_block(iterator) + + libflac.FLAC__metadata_simple_iterator_set_block.restype = FLAC__bool + libflac.FLAC__metadata_simple_iterator_set_block.argtypes = [POINTER(FLAC__Metadata_SimpleIterator), POINTER(FLAC__StreamMetadata), FLAC__bool] + + def FLAC__metadata_simple_iterator_set_block(iterator, block, use_padding): + return libflac.FLAC__metadata_simple_iterator_set_block(iterator, block, use_padding) + + libflac.FLAC__metadata_simple_iterator_insert_block_after.restype = FLAC__bool + libflac.FLAC__metadata_simple_iterator_insert_block_after.argtypes = [POINTER(FLAC__Metadata_SimpleIterator), POINTER(FLAC__StreamMetadata), FLAC__bool] + + def FLAC__metadata_simple_iterator_insert_block_after(iterator, block, use_padding): + return libflac.FLAC__metadata_simple_iterator_insert_block_after(iterator, block, use_padding) + + libflac.FLAC__metadata_simple_iterator_delete_block.restype = FLAC__bool + libflac.FLAC__metadata_simple_iterator_delete_block.argtypes = [POINTER(FLAC__Metadata_SimpleIterator), FLAC__bool] + + def FLAC__metadata_simple_iterator_delete_block(iterator, use_padding): + return libflac.FLAC__metadata_simple_iterator_delete_block(iterator, use_padding) + + class FLAC__Metadata_Chain(Structure): + _fields_ = [("dummy", c_int)] + + class FLAC__Metadata_Iterator(Structure): + _fields_ = [("dummy", c_int)] + + FLAC__Metadata_ChainStatus = c_int + + FLAC__METADATA_CHAIN_STATUS_OK = 0 + + libflac.FLAC__Metadata_ChainStatusString.restype = c_char_p + libflac.FLAC__Metadata_ChainStatusString.argtypes = [] + + def FLAC__Metadata_ChainStatusString(): + return libflac.FLAC__Metadata_ChainStatusString() + + libflac.FLAC__metadata_chain_new.restype = POINTER(FLAC__Metadata_Chain) + libflac.FLAC__metadata_chain_new.argtypes = [] + + def FLAC__metadata_chain_new(): + return libflac.FLAC__metadata_chain_new() + + libflac.FLAC__metadata_chain_delete.restype = None + libflac.FLAC__metadata_chain_delete.argtypes = [POINTER(FLAC__Metadata_Chain)] + + def FLAC__metadata_chain_delete(chain): + return libflac.FLAC__metadata_chain_delete(chain) + + libflac.FLAC__metadata_chain_status.restype = FLAC__Metadata_ChainStatus + libflac.FLAC__metadata_chain_status.argtypes = [POINTER(FLAC__Metadata_Chain)] + + def FLAC__metadata_chain_status(chain): + return libflac.FLAC__metadata_chain_status(chain) + + libflac.FLAC__metadata_chain_read.restype = FLAC__bool + libflac.FLAC__metadata_chain_read.argtypes = [POINTER(FLAC__Metadata_Chain), c_char_p] + + def FLAC__metadata_chain_read(chain, filename): + return libflac.FLAC__metadata_chain_read(chain, filename) + + libflac.FLAC__metadata_chain_read_ogg.restype = FLAC__bool + libflac.FLAC__metadata_chain_read_ogg.argtypes = [POINTER(FLAC__Metadata_Chain), c_char_p] + + def FLAC__metadata_chain_read_ogg(chain, filename): + return libflac.FLAC__metadata_chain_read_ogg(chain, filename) + + libflac.FLAC__metadata_chain_read_with_callbacks.restype = FLAC__bool + libflac.FLAC__metadata_chain_read_with_callbacks.argtypes = [POINTER(FLAC__Metadata_Chain), FLAC__IOHandle, FLAC__IOCallbacks] + + def FLAC__metadata_chain_read_with_callbacks(chain, handle, callbacks): + return libflac.FLAC__metadata_chain_read_with_callbacks(chain, handle, callbacks) + + libflac.FLAC__metadata_chain_read_ogg_with_callbacks.restype = FLAC__bool + libflac.FLAC__metadata_chain_read_ogg_with_callbacks.argtypes = [POINTER(FLAC__Metadata_Chain), FLAC__IOHandle, FLAC__IOCallbacks] + + def FLAC__metadata_chain_read_ogg_with_callbacks(chain, handle, callbacks): + return libflac.FLAC__metadata_chain_read_ogg_with_callbacks(chain, handle, callbacks) + + libflac.FLAC__metadata_chain_check_if_tempfile_needed.restype = FLAC__bool + libflac.FLAC__metadata_chain_check_if_tempfile_needed.argtypes = [POINTER(FLAC__Metadata_Chain), FLAC__bool] + + def FLAC__metadata_chain_check_if_tempfile_needed(chain, use_padding): + return libflac.FLAC__metadata_chain_check_if_tempfile_needed(chain, use_padding) + + libflac.FLAC__metadata_chain_write.restype = FLAC__bool + libflac.FLAC__metadata_chain_write.argtypes = [POINTER(FLAC__Metadata_Chain), FLAC__bool, FLAC__bool] + + def FLAC__metadata_chain_write(chain, use_padding, preserve_file_stats): + return libflac.FLAC__metadata_chain_write(chain, use_padding, preserve_file_stats) + + libflac.FLAC__metadata_chain_write_with_callbacks.restype = FLAC__bool + libflac.FLAC__metadata_chain_write_with_callbacks.argtypes = [POINTER(FLAC__Metadata_Chain), FLAC__bool, FLAC__IOHandle, FLAC__IOCallbacks] + + def FLAC__metadata_chain_write_with_callbacks(chain, use_padding, handle, callbacks): + return libflac.FLAC__metadata_chain_write_with_callbacks(chain, use_padding, handle, callbacks) + + libflac.FLAC__metadata_chain_write_with_callbacks_and_tempfile.restype = FLAC__bool + libflac.FLAC__metadata_chain_write_with_callbacks_and_tempfile.argtypes = [POINTER(FLAC__Metadata_Chain), FLAC__bool, FLAC__IOHandle, FLAC__IOCallbacks, FLAC__IOHandle, FLAC__IOCallbacks] + + def FLAC__metadata_chain_write_with_callbacks_and_tempfile(chain, use_padding, handle, callbacks, temp_handle, temp_callbacks): + return libflac.FLAC__metadata_chain_write_with_callbacks_and_tempfile(chain, use_padding, handle, callbacks, temp_handle, temp_callbacks) + + libflac.FLAC__metadata_chain_merge_padding.restype = None + libflac.FLAC__metadata_chain_merge_padding.argtypes = [POINTER(FLAC__Metadata_Chain)] + + def FLAC__metadata_chain_merge_padding(chain): + return libflac.FLAC__metadata_chain_merge_padding(chain) + + libflac.FLAC__metadata_chain_sort_padding.restype = None + libflac.FLAC__metadata_chain_sort_padding.argtypes = [POINTER(FLAC__Metadata_Chain)] + + def FLAC__metadata_chain_sort_padding(chain): + return libflac.FLAC__metadata_chain_sort_padding(chain) + + libflac.FLAC__metadata_iterator_new.restype = POINTER(FLAC__Metadata_Iterator) + libflac.FLAC__metadata_iterator_new.argtypes = [] + + def FLAC__metadata_iterator_new(): + return libflac.FLAC__metadata_iterator_new() + + libflac.FLAC__metadata_iterator_delete.restype = None + libflac.FLAC__metadata_iterator_delete.argtypes = [POINTER(FLAC__Metadata_Iterator)] + + def FLAC__metadata_iterator_delete(iterator): + return libflac.FLAC__metadata_iterator_delete(iterator) + + libflac.FLAC__metadata_iterator_init.restype = None + libflac.FLAC__metadata_iterator_init.argtypes = [POINTER(FLAC__Metadata_Iterator), POINTER(FLAC__Metadata_Chain)] + + def FLAC__metadata_iterator_init(iterator, chain): + return libflac.FLAC__metadata_iterator_init(iterator, chain) + + libflac.FLAC__metadata_iterator_next.restype = FLAC__bool + libflac.FLAC__metadata_iterator_next.argtypes = [POINTER(FLAC__Metadata_Iterator)] + + def FLAC__metadata_iterator_next(iterator): + return libflac.FLAC__metadata_iterator_next(iterator) + + libflac.FLAC__metadata_iterator_prev.restype = FLAC__bool + libflac.FLAC__metadata_iterator_prev.argtypes = [POINTER(FLAC__Metadata_Iterator)] + + def FLAC__metadata_iterator_prev(iterator): + return libflac.FLAC__metadata_iterator_prev(iterator) + + libflac.FLAC__metadata_iterator_get_block_type.restype = FLAC__MetadataType + libflac.FLAC__metadata_iterator_get_block_type.argtypes = [POINTER(FLAC__Metadata_Iterator)] + + def FLAC__metadata_iterator_get_block_type(iterator): + return libflac.FLAC__metadata_iterator_get_block_type(iterator) + + libflac.FLAC__metadata_iterator_get_block_type.restype = POINTER(FLAC__StreamMetadata) + libflac.FLAC__metadata_iterator_get_block_type.argtypes = [POINTER(FLAC__Metadata_Iterator)] + + def FLAC__metadata_iterator_get_block_type(iterator): + return libflac.FLAC__metadata_iterator_get_block_type(iterator) + + libflac.FLAC__metadata_iterator_set_block.restype = FLAC__bool + libflac.FLAC__metadata_iterator_set_block.argtypes = [POINTER(FLAC__Metadata_Iterator), POINTER(FLAC__StreamMetadata)] + + def FLAC__metadata_iterator_set_block(iterator, block): + return libflac.FLAC__metadata_iterator_set_block(iterator, block) + + libflac.FLAC__metadata_iterator_delete_block.restype = FLAC__bool + libflac.FLAC__metadata_iterator_delete_block.argtypes = [POINTER(FLAC__Metadata_Iterator), FLAC__bool] + + def FLAC__metadata_iterator_delete_block(iterator, replace_with_padding): + return libflac.FLAC__metadata_iterator_delete_block(iterator, replace_with_padding) + + libflac.FLAC__metadata_iterator_insert_block_before.restype = FLAC__bool + libflac.FLAC__metadata_iterator_insert_block_before.argtypes = [POINTER(FLAC__Metadata_Iterator), POINTER(FLAC__StreamMetadata)] + + def FLAC__metadata_iterator_insert_block_before(iterator, block): + return libflac.FLAC__metadata_iterator_insert_block_before(iterator, block) + + libflac.FLAC__metadata_iterator_insert_block_after.restype = FLAC__bool + libflac.FLAC__metadata_iterator_insert_block_after.argtypes = [POINTER(FLAC__Metadata_Iterator), POINTER(FLAC__StreamMetadata)] + + def FLAC__metadata_iterator_insert_block_after(iterator, block): + return libflac.FLAC__metadata_iterator_insert_block_after(iterator, block) + + libflac.FLAC__metadata_object_new.restype = POINTER(FLAC__StreamMetadata) + libflac.FLAC__metadata_object_new.argtypes = [POINTER(FLAC__MetadataType)] + + def FLAC__metadata_object_new(type): + return libflac.FLAC__metadata_object_new(type) + + libflac.FLAC__metadata_object_clone.restype = POINTER(FLAC__StreamMetadata) + libflac.FLAC__metadata_object_clone.argtypes = [POINTER(FLAC__StreamMetadata)] + + def FLAC__metadata_object_clone(object): + return libflac.FLAC__metadata_object_clone(object) + + libflac.FLAC__metadata_object_delete.restype = None + libflac.FLAC__metadata_object_delete.argtypes = [POINTER(FLAC__StreamMetadata)] + + def FLAC__metadata_object_delete(object): + return libflac.FLAC__metadata_object_delete(object) + + libflac.FLAC__metadata_object_is_equal.restype = FLAC__bool + libflac.FLAC__metadata_object_is_equal.argtypes = [POINTER(FLAC__StreamMetadata), POINTER(FLAC__StreamMetadata)] + + def FLAC__metadata_object_is_equal(block1, block2): + return libflac.FLAC__metadata_object_is_equal(block1, block2) + + libflac.FLAC__metadata_object_application_set_data.restype = FLAC__bool + libflac.FLAC__metadata_object_application_set_data.argtypes = [POINTER(FLAC__StreamMetadata), FLAC__byte_p, c_uint, FLAC__bool] + + def FLAC__metadata_object_application_set_data(object, data, length, copy): + return libflac.FLAC__metadata_object_application_set_data(object, data, length, copy) + + libflac.FLAC__metadata_object_seektable_resize_points.restype = FLAC__bool + libflac.FLAC__metadata_object_seektable_resize_points.argtypes = [POINTER(FLAC__StreamMetadata),c_uint] + + def FLAC__metadata_object_seektable_resize_points(object, new_num_points): + return libflac.FLAC__metadata_object_seektable_resize_points(object, new_num_points) + + libflac.FLAC__metadata_object_seektable_set_point.restype = None + libflac.FLAC__metadata_object_seektable_set_point.argtypes = [POINTER(FLAC__StreamMetadata),c_uint, FLAC__StreamMetadata_SeekPoint] + + def FLAC__metadata_object_seektable_set_point(object, point_num, point): + return libflac.FLAC__metadata_object_seektable_set_point(object, point_num, point) + + libflac.FLAC__metadata_object_seektable_insert_point.restype = FLAC__bool + libflac.FLAC__metadata_object_seektable_insert_point.argtypes = [POINTER(FLAC__StreamMetadata),c_uint, FLAC__StreamMetadata_SeekPoint] + + def FLAC__metadata_object_seektable_insert_point(object, point_num, point): + return libflac.FLAC__metadata_object_seektable_insert_point(object, point_num, point) + + libflac.FLAC__metadata_object_seektable_delete_point.restype = FLAC__bool + libflac.FLAC__metadata_object_seektable_delete_point.argtypes = [POINTER(FLAC__StreamMetadata),c_uint] + + def FLAC__metadata_object_seektable_delete_point(object, point_num): + return libflac.FLAC__metadata_object_seektable_delete_point(object, point_num) + + libflac.FLAC__metadata_object_seektable_is_legal.restype = FLAC__bool + libflac.FLAC__metadata_object_seektable_is_legal.argtypes = [POINTER(FLAC__StreamMetadata)] + + def FLAC__metadata_object_seektable_is_legal(object): + return libflac.FLAC__metadata_object_seektable_is_legal(object) + + libflac.FLAC__metadata_object_seektable_template_append_placeholders.restype = FLAC__bool + libflac.FLAC__metadata_object_seektable_template_append_placeholders.argtypes = [POINTER(FLAC__StreamMetadata), c_uint] + + def FLAC__metadata_object_seektable_template_append_placeholders(object, num): + return libflac.FLAC__metadata_object_seektable_template_append_placeholders(object, num) + + libflac.FLAC__metadata_object_seektable_template_append_point.restype = FLAC__bool + libflac.FLAC__metadata_object_seektable_template_append_point.argtypes = [POINTER(FLAC__StreamMetadata), FLAC__uint64] + + def FLAC__metadata_object_seektable_template_append_point(object, sample_number): + return libflac.FLAC__metadata_object_seektable_template_append_point(object, sample_number) + + libflac.FLAC__metadata_object_seektable_template_append_points.restype = FLAC__bool + libflac.FLAC__metadata_object_seektable_template_append_points.argtypes = [POINTER(FLAC__StreamMetadata), POINTER(FLAC__uint64*0), c_uint] + + def FLAC__metadata_object_seektable_template_append_points(object, sample_numbers, num): + return libflac.FLAC__metadata_object_seektable_template_append_points(object, sample_numbers, num) + + libflac.FLAC__metadata_object_seektable_template_append_spaced_points.restype = FLAC__bool + libflac.FLAC__metadata_object_seektable_template_append_spaced_points.argtypes = [POINTER(FLAC__StreamMetadata), c_uint, FLAC__uint64] + + def FLAC__metadata_object_seektable_template_append_spaced_points(object, num, total_samples): + return libflac.FLAC__metadata_object_seektable_template_append_spaced_points(object, num, total_samples) + + libflac.FLAC__metadata_object_seektable_template_append_spaced_points_by_samples.restype = FLAC__bool + libflac.FLAC__metadata_object_seektable_template_append_spaced_points_by_samples.argtypes = [POINTER(FLAC__StreamMetadata), c_uint, FLAC__uint64] + + def FLAC__metadata_object_seektable_template_append_spaced_points_by_samples(object, samples, total_samples): + return libflac.FLAC__metadata_object_seektable_template_append_spaced_points_by_samples(object, samples, total_samples) + + libflac.FLAC__metadata_object_seektable_template_sort.restype = FLAC__bool + libflac.FLAC__metadata_object_seektable_template_sort.argtypes = [POINTER(FLAC__StreamMetadata), FLAC__bool] + + def FLAC__metadata_object_seektable_template_sort(object, compact): + return libflac.FLAC__metadata_object_seektable_template_sort(object, compact) + + libflac.FLAC__metadata_object_vorbiscomment_set_vendor_string.restype = FLAC__bool + libflac.FLAC__metadata_object_vorbiscomment_set_vendor_string.argtypes = [POINTER(FLAC__StreamMetadata), FLAC__StreamMetadata_VorbisComment_Entry, FLAC__bool] + + def FLAC__metadata_object_vorbiscomment_set_vendor_string(object, entry, copy): + return libflac.FLAC__metadata_object_vorbiscomment_set_vendor_string(object, entry, copy) + + libflac.FLAC__metadata_object_vorbiscomment_resize_comments.restype = FLAC__bool + libflac.FLAC__metadata_object_vorbiscomment_resize_comments.argtypes = [POINTER(FLAC__StreamMetadata), c_uint] + + def FLAC__metadata_object_vorbiscomment_resize_comments(object, new_num_comments): + return libflac.FLAC__metadata_object_vorbiscomment_resize_comments(object, new_num_comments) + + libflac.FLAC__metadata_object_vorbiscomment_set_comment.restype = FLAC__bool + libflac.FLAC__metadata_object_vorbiscomment_set_comment.argtypes = [POINTER(FLAC__StreamMetadata), c_uint, FLAC__StreamMetadata_VorbisComment_Entry, FLAC__bool] + + def FLAC__metadata_object_vorbiscomment_set_comment(object, comment_num, entry, copy): + return libflac.FLAC__metadata_object_vorbiscomment_set_comment(object, comment_num, entry, copy) + + libflac.FLAC__metadata_object_vorbiscomment_insert_comment.restype = FLAC__bool + libflac.FLAC__metadata_object_vorbiscomment_insert_comment.argtypes = [POINTER(FLAC__StreamMetadata), c_uint, FLAC__StreamMetadata_VorbisComment_Entry, FLAC__bool] + + def FLAC__metadata_object_vorbiscomment_insert_comment(object, comment_num, entry, copy): + return libflac.FLAC__metadata_object_vorbiscomment_insert_comment(object, comment_num, entry, copy) + + libflac.FLAC__metadata_object_vorbiscomment_append_comment.restype = FLAC__bool + libflac.FLAC__metadata_object_vorbiscomment_append_comment.argtypes = [POINTER(FLAC__StreamMetadata), c_uint, FLAC__StreamMetadata_VorbisComment_Entry, FLAC__bool] + + def FLAC__metadata_object_vorbiscomment_append_comment(object, entry, copy): + return libflac.FLAC__metadata_object_vorbiscomment_append_comment(object,entry, copy) + + libflac.FLAC__metadata_object_vorbiscomment_replace_comment.restype = FLAC__bool + libflac.FLAC__metadata_object_vorbiscomment_replace_comment.argtypes = [POINTER(FLAC__StreamMetadata), c_uint, FLAC__StreamMetadata_VorbisComment_Entry, FLAC__bool, FLAC__bool] + + def FLAC__metadata_object_vorbiscomment_replace_comment(object, entry, all, copy): + return libflac.FLAC__metadata_object_vorbiscomment_replace_comment(object,entry, all, copy) + + libflac.FLAC__metadata_object_vorbiscomment_delete_comment.restype = FLAC__bool + libflac.FLAC__metadata_object_vorbiscomment_delete_comment.argtypes = [POINTER(FLAC__StreamMetadata), c_uint] + + def FLAC__metadata_object_vorbiscomment_delete_comment(object, comment_num): + return libflac.FLAC__metadata_object_vorbiscomment_delete_comment(object,comment_num) + + libflac.FLAC__metadata_object_vorbiscomment_entry_from_name_value_pair.restype = FLAC__bool + libflac.FLAC__metadata_object_vorbiscomment_entry_from_name_value_pair.argtypes = [POINTER(FLAC__StreamMetadata_VorbisComment_Entry), c_char_p, c_char_p] + + def FLAC__metadata_object_vorbiscomment_entry_from_name_value_pair(entry, field_name, field_value): + return libflac.FLAC__metadata_object_vorbiscomment_entry_from_name_value_pair(entry, field_name, field_value) + + libflac.FLAC__metadata_object_vorbiscomment_entry_to_name_value_pair.restype = FLAC__bool + libflac.FLAC__metadata_object_vorbiscomment_entry_to_name_value_pair.argtypes = [POINTER(FLAC__StreamMetadata_VorbisComment_Entry), c_char_p_p, c_char_p_p] + + def FLAC__metadata_object_vorbiscomment_entry_to_name_value_pair(entry, field_name, field_value): + return libflac.FLAC__metadata_object_vorbiscomment_entry_to_name_value_pair(entry, field_name, field_value) + + libflac.FLAC__metadata_object_vorbiscomment_entry_matches.restype = FLAC__bool + libflac.FLAC__metadata_object_vorbiscomment_entry_matches.argtypes = [POINTER(FLAC__StreamMetadata_VorbisComment_Entry), c_char_p, c_uint] + + def FLAC__metadata_object_vorbiscomment_entry_matches(entry, field_name, field_value): + return libflac.FLAC__metadata_object_vorbiscomment_entry_matches(entry, field_name, field_value) + + libflac.FLAC__metadata_object_vorbiscomment_find_entry_from.restype = c_int + libflac.FLAC__metadata_object_vorbiscomment_find_entry_from.argtypes = [POINTER(FLAC__StreamMetadata), c_uint, c_char_p] + + def FLAC__metadata_object_vorbiscomment_find_entry_from(object, offset, field_name): + return libflac.FLAC__metadata_object_vorbiscomment_find_entry_from(object, offset, field_name) + + libflac.FLAC__metadata_object_vorbiscomment_remove_entry_matching.restype = c_int + libflac.FLAC__metadata_object_vorbiscomment_remove_entry_matching.argtypes = [POINTER(FLAC__StreamMetadata), c_char_p] + + def FLAC__metadata_object_vorbiscomment_remove_entry_matching(object, field_name): + return libflac.FLAC__metadata_object_vorbiscomment_remove_entry_matching(object, field_name) + + libflac.FLAC__metadata_object_vorbiscomment_remove_entries_matching.restype = c_int + libflac.FLAC__metadata_object_vorbiscomment_remove_entries_matching.argtypes = [POINTER(FLAC__StreamMetadata), c_char_p] + + def FLAC__metadata_object_vorbiscomment_remove_entries_matching(object, field_name): + return libflac.FLAC__metadata_object_vorbiscomment_remove_entries_matching(object, field_name) + + libflac.FLAC__metadata_object_cuesheet_track_new.restype = POINTER(FLAC__StreamMetadata_CueSheet_Track) + libflac.FLAC__metadata_object_cuesheet_track_new.argtypes = [] + + def FLAC__metadata_object_cuesheet_track_new(): + return libflac.FLAC__metadata_object_cuesheet_track_new() + + libflac.FLAC__metadata_object_cuesheet_track_delete.restype = None + libflac.FLAC__metadata_object_cuesheet_track_delete.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track)] + + def FLAC__metadata_object_cuesheet_track_delete(object): + return libflac.FLAC__metadata_object_cuesheet_track_delete(object) + + libflac.FLAC__metadata_object_cuesheet_track_resize_indices.restype = FLAC__bool + libflac.FLAC__metadata_object_cuesheet_track_resize_indices.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track), c_uint, c_uint] + + def FLAC__metadata_object_cuesheet_track_resize_indices(object, track_num, new_num_indices): + return libflac.FLAC__metadata_object_cuesheet_track_resize_indices(object, track_num, new_num_indices) + + libflac.FLAC__metadata_object_cuesheet_track_insert_index.restype = FLAC__bool + libflac.FLAC__metadata_object_cuesheet_track_insert_index.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track), c_uint, c_uint, FLAC__StreamMetadata_CueSheet_Index] + + def FLAC__metadata_object_cuesheet_track_insert_index(object, track_num, index_num, index): + return libflac.FLAC__metadata_object_cuesheet_track_insert_index(object, track_num, index_num, index) + + libflac.FLAC__metadata_object_cuesheet_track_insert_blank_index.restype = FLAC__bool + libflac.FLAC__metadata_object_cuesheet_track_insert_blank_index.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track), c_uint, c_uint] + + def FLAC__metadata_object_cuesheet_track_insert_blank_index(object, track_num, index_num): + return libflac.FLAC__metadata_object_cuesheet_track_insert_blank_index(object, track_num, index_num) + + libflac.FLAC__metadata_object_cuesheet_track_delete_index.restype = FLAC__bool + libflac.FLAC__metadata_object_cuesheet_track_delete_index.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track), c_uint, c_uint] + + def FLAC__metadata_object_cuesheet_track_delete_index(object, track_num, index_num): + return libflac.FLAC__metadata_object_cuesheet_track_delete_index(object, track_num, index_num) + + libflac.FLAC__metadata_object_cuesheet_resize_tracks.restype = FLAC__bool + libflac.FLAC__metadata_object_cuesheet_resize_tracks.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track), c_uint] + + def FLAC__metadata_object_cuesheet_resize_tracks(object, new_num_tracks): + return libflac.FLAC__metadata_object_cuesheet_resize_tracks(object, new_num_tracks) + + libflac.FLAC__metadata_object_cuesheet_set_track.restype = FLAC__bool + libflac.FLAC__metadata_object_cuesheet_set_track.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track), c_uint, POINTER(FLAC__StreamMetadata_CueSheet_Track), FLAC__bool] + + def FLAC__metadata_object_cuesheet_set_track(object, new_num_tracks, track, copy): + return libflac.FLAC__metadata_object_cuesheet_set_track(object, new_num_tracks, track, copy) + + libflac.FLAC__metadata_object_cuesheet_insert_track.restype = FLAC__bool + libflac.FLAC__metadata_object_cuesheet_insert_track.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track), c_uint, POINTER(FLAC__StreamMetadata_CueSheet_Track), FLAC__bool] + + def FLAC__metadata_object_cuesheet_insert_track(object, track_num, track, copy): + return libflac.FLAC__metadata_object_cuesheet_insert_track(object, track_num, track, copy) + + libflac.FLAC__metadata_object_cuesheet_insert_blank_track.restype = FLAC__bool + libflac.FLAC__metadata_object_cuesheet_insert_blank_track.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track), c_uint] + + def FLAC__metadata_object_cuesheet_insert_blank_track(object, track_num): + return libflac.FLAC__metadata_object_cuesheet_insert_blank_track(object, track_num) + + libflac.FLAC__metadata_object_cuesheet_delete_track.restype = FLAC__bool + libflac.FLAC__metadata_object_cuesheet_delete_track.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track), c_uint] + + def FLAC__metadata_object_cuesheet_delete_track(object, track_num): + return libflac.FLAC__metadata_object_cuesheet_delete_track(object, track_num) + + libflac.FLAC__metadata_object_cuesheet_is_legal.restype = FLAC__bool + libflac.FLAC__metadata_object_cuesheet_is_legal.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track), FLAC__bool, c_char_p_p] + + def FLAC__metadata_object_cuesheet_is_legal(object, check_cd_da_subset, violation): + return libflac.FLAC__metadata_object_cuesheet_is_legal(object, check_cd_da_subset, violation) + + libflac.FLAC__metadata_object_cuesheet_calculate_cddb_id.restype = FLAC__uint32 + libflac.FLAC__metadata_object_cuesheet_calculate_cddb_id.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track)] + + def FLAC__metadata_object_cuesheet_calculate_cddb_id(object): + return libflac.FLAC__metadata_object_cuesheet_calculate_cddb_id(object) + + libflac.FLAC__metadata_object_picture_set_mime_type.restype = FLAC__bool + libflac.FLAC__metadata_object_picture_set_mime_type.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track), c_char_p, FLAC__bool] + + def FLAC__metadata_object_picture_set_mime_type(object, mime_type, copy): + return libflac.FLAC__metadata_object_picture_set_mime_type(object, mime_type, copy) + + libflac.FLAC__metadata_object_picture_set_description.restype = FLAC__bool + libflac.FLAC__metadata_object_picture_set_description.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track), FLAC__byte_p, FLAC__bool] + + def FLAC__metadata_object_picture_set_description(object, description, copy): + return libflac.FLAC__metadata_object_picture_set_description(object, mime_type, copy) + + libflac.FLAC__metadata_object_picture_set_data.restype = FLAC__bool + libflac.FLAC__metadata_object_picture_set_data.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track), FLAC__byte_p,FLAC__uint32, FLAC__bool] + + def FLAC__metadata_object_picture_set_data(object, data, length, copy): + return libflac.FLAC__metadata_object_picture_set_data(object, mime_type, copy) + + libflac.FLAC__metadata_object_picture_is_legal.restype = FLAC__bool + libflac.FLAC__metadata_object_picture_is_legal.argtypes = [POINTER(FLAC__StreamMetadata_CueSheet_Track), c_char_p] + + def FLAC__metadata_object_picture_is_legal(object, violation): + return libflac.FLAC__metadata_object_picture_is_legal(object, violation) + + # /metadata + + # stream_decoder + + FLAC__StreamDecoderState = c_int + FLAC__StreamDecoderStateEnum = ["FLAC__STREAM_DECODER_SEARCH_FOR_METADATA", + "FLAC__STREAM_DECODER_READ_METADATA", + "FLAC__STREAM_DECODER_SEARCH_FOR_FRAME_SYNC", + "FLAC__STREAM_DECODER_READ_FRAME", + "FLAC__STREAM_DECODER_END_OF_STREAM", + "FLAC__STREAM_DECODER_OGG_ERROR", + "FLAC__STREAM_DECODER_SEEK_ERROR", + "FLAC__STREAM_DECODER_ABORTED", + "FLAC__STREAM_DECODER_MEMORY_ALLOCATION_ERROR", + "FLAC__STREAM_DECODER_UNINITIALIZED"] + + libflac.FLAC__StreamDecoderStateString.restype = c_char_p + libflac.FLAC__StreamDecoderStateString.argtypes = [] + + def FLAC__StreamDecoderStateString(): + return libflac.FLAC__StreamDecoderStateString() + + + FLAC__StreamDecoderInitStatus = c_int + FLAC__StreamDecoderInitStatusEnum = ["FLAC__STREAM_DECODER_INIT_STATUS_OK", + "FLAC__STREAM_DECODER_INIT_STATUS_UNSUPPORTED_CONTAINER", + "FLAC__STREAM_DECODER_INIT_STATUS_INVALID_CALLBACKS", + "FLAC__STREAM_DECODER_INIT_STATUS_MEMORY_ALLOCATION_ERROR", + "FLAC__STREAM_DECODER_INIT_STATUS_ERROR_OPENING_FILE", + "FLAC__STREAM_DECODER_INIT_STATUS_ALREADY_INITIALIZED"] + + libflac.FLAC__StreamDecoderInitStatusString.restype = c_char_p + libflac.FLAC__StreamDecoderInitStatusString.argtypes = [] + + def FLAC__StreamDecoderInitStatusString(): + return libflac.FLAC__StreamDecoderInitStatusString() + + + FLAC__StreamDecoderReadStatus = c_int + FLAC__StreamDecoderReadStatusEnum = ["FLAC__STREAM_DECODER_READ_STATUS_CONTINUE", + "FLAC__STREAM_DECODER_READ_STATUS_END_OF_STREAM", + "FLAC__STREAM_DECODER_READ_STATUS_ABORT"] + + libflac.FLAC__StreamDecoderReadStatusString.restype = c_char_p + libflac.FLAC__StreamDecoderReadStatusString.argtypes = [] + + def FLAC__StreamDecoderReadStatusString(): + return libflac.FLAC__StreamDecoderReadStatusString() + + + FLAC__StreamDecoderSeekStatus = c_int + FLAC__StreamDecoderSeekStatusEnum = ["FLAC__STREAM_DECODER_SEEK_STATUS_OK", + "FLAC__STREAM_DECODER_SEEK_STATUS_ERROR", + "FLAC__STREAM_DECODER_SEEK_STATUS_UNSUPPORTED"] + + libflac.FLAC__StreamDecoderSeekStatusString.restype = c_char_p + libflac.FLAC__StreamDecoderSeekStatusString.argtypes = [] + + def FLAC__StreamDecoderSeekStatusString(): + return libflac.FLAC__StreamDecoderSeekStatusString() + + + FLAC__StreamDecoderTellStatus = c_int + FLAC__StreamDecoderTellStatusEnum = ["FLAC__STREAM_DECODER_TELL_STATUS_OK", + "FLAC__STREAM_DECODER_TELL_STATUS_ERROR", + "FLAC__STREAM_DECODER_TELL_STATUS_UNSUPPORTED"] + + libflac.FLAC__StreamDecoderTellStatusString.restype = c_char_p + libflac.FLAC__StreamDecoderTellStatusString.argtypes = [] + + def FLAC__StreamDecoderTellStatusString(): + return libflac.FLAC__StreamDecoderTellStatusString() + + + FLAC__StreamDecoderLengthStatus = c_int + FLAC__StreamDecoderLengthStatusEnum = ["FLAC__STREAM_DECODER_LENGTH_STATUS_OK", + "FLAC__STREAM_DECODER_LENGTH_STATUS_ERROR", + "FLAC__STREAM_DECODER_LENGTH_STATUS_UNSUPPORTED"] + + libflac.FLAC__StreamDecoderLengthStatusString.restype = c_char_p + libflac.FLAC__StreamDecoderLengthStatusString.argtypes = [] + + def FLAC__StreamDecoderLengthStatusString(): + return libflac.FLAC__StreamDecoderLengthStatusString() + + + FLAC__StreamDecoderWriteStatus = c_int + FLAC__StreamDecoderWriteStatusEnum = ["FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE", "FLAC__STREAM_DECODER_WRITE_STATUS_ABORT"] + + libflac.FLAC__StreamDecoderWriteStatusString.restype = c_char_p + libflac.FLAC__StreamDecoderWriteStatusString.argtypes = [] + + def FLAC__StreamDecoderWriteStatusString(): + return libflac.FLAC__StreamDecoderWriteStatusString() + + FLAC__StreamDecoderErrorStatus = c_int + FLAC__StreamDecoderErrorStatusEnum = ["FLAC__STREAM_DECODER_ERROR_STATUS_LOST_SYNC", + "FLAC__STREAM_DECODER_ERROR_STATUS_BAD_HEADER", + "FLAC__STREAM_DECODER_ERROR_STATUS_FRAME_CRC_MISMATCH", + "FLAC__STREAM_DECODER_ERROR_STATUS_UNPARSEABLE_STREAM"] + + libflac.FLAC__StreamDecoderErrorStatusString.restype = c_char_p + libflac.FLAC__StreamDecoderErrorStatusString.argtypes = [] + + def FLAC__StreamDecoderErrorStatusString(): + return libflac.FLAC__StreamDecoderErrorStatusString() + + + + class FLAC__StreamDecoderProtected(Structure): + _fields_ = [("dummy", c_int)] + + class FLAC__StreamDecoderPrivate(Structure): + _fields_ = [("dummy", c_int)] + + class FLAC__StreamDecoder(Structure): + _fields_ = [("protected_", POINTER(FLAC__StreamDecoderProtected)), + ("private_", POINTER(FLAC__StreamDecoderPrivate))] + + FLAC__StreamDecoderReadCallback = CFUNCTYPE( + FLAC__StreamDecoderReadStatus, + POINTER(FLAC__StreamDecoder), + POINTER(FLAC__byte*0), + c_size_t_p, + c_void_p + ) + + FLAC__StreamDecoderSeekCallback = CFUNCTYPE( + FLAC__StreamDecoderSeekStatus, + POINTER(FLAC__StreamDecoder), + FLAC__uint64, + c_void_p + ) + + FLAC__StreamDecoderTellCallback = CFUNCTYPE( + FLAC__StreamDecoderTellStatus, + POINTER(FLAC__StreamDecoder), + FLAC__uint64_p, + c_void_p + ) + + FLAC__StreamDecoderLengthCallback = CFUNCTYPE( + FLAC__StreamDecoderLengthStatus, + POINTER(FLAC__StreamDecoder), + FLAC__uint64_p, + c_void_p + ) + + FLAC__StreamDecoderEofCallback = CFUNCTYPE( + FLAC__bool, + POINTER(FLAC__StreamDecoder), + c_void_p + ) + + FLAC__StreamDecoderWriteCallback = CFUNCTYPE( + FLAC__StreamDecoderWriteStatus, + POINTER(FLAC__StreamDecoder), + POINTER(FLAC__Frame), + POINTER(FLAC__int32_p*0), + c_void_p + ) + + FLAC__StreamDecoderMetadataCallback = CFUNCTYPE( + None, + POINTER(FLAC__StreamDecoder), + POINTER(FLAC__StreamMetadata), + c_void_p + ) + + FLAC__StreamDecoderErrorCallback = CFUNCTYPE( + None, + POINTER(FLAC__StreamDecoder), + FLAC__StreamDecoderErrorStatus, + c_void_p + ) + + + libflac.FLAC__stream_decoder_new.restype = POINTER(FLAC__StreamDecoder) + libflac.FLAC__stream_decoder_new.argtypes = [] + + def FLAC__stream_decoder_new(): + return libflac.FLAC__stream_decoder_new() + + libflac.FLAC__stream_decoder_delete.restype = None + libflac.FLAC__stream_decoder_delete.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_delete(decoder): + return libflac.FLAC__stream_decoder_delete(decoder) + + + libflac.FLAC__stream_decoder_set_ogg_serial_number.restype = FLAC__bool + libflac.FLAC__stream_decoder_set_ogg_serial_number.argtypes = [POINTER(FLAC__StreamDecoder), c_long] + + def FLAC__stream_decoder_set_ogg_serial_number(decoder, serial_number): + return libflac.FLAC__stream_decoder_set_ogg_serial_number(decoder, serial_number) + + libflac.FLAC__stream_decoder_set_md5_checking.restype = FLAC__bool + libflac.FLAC__stream_decoder_set_md5_checking.argtypes = [POINTER(FLAC__StreamDecoder), FLAC__bool] + + def FLAC__stream_decoder_set_md5_checking(decoder, value): + return libflac.FLAC__stream_decoder_set_md5_checking(decoder, value) + + libflac.FLAC__stream_decoder_set_metadata_respond.restype = FLAC__bool + libflac.FLAC__stream_decoder_set_metadata_respond.argtypes = [POINTER(FLAC__StreamDecoder), FLAC__MetadataType] + + def FLAC__stream_decoder_set_metadata_respond(decoder, type): + return libflac.FLAC__stream_decoder_set_metadata_respond(decoder, type) + + libflac.FLAC__stream_decoder_set_metadata_respond_application.restype = FLAC__bool + libflac.FLAC__stream_decoder_set_metadata_respond_application.argtypes = [POINTER(FLAC__StreamDecoder), FLAC__byte*4] + + def FLAC__stream_decoder_set_metadata_respond_application(decoder, id): + return libflac.FLAC__stream_decoder_set_metadata_respond_application(decoder, id) + + libflac.FLAC__stream_decoder_set_metadata_respond_all.restype = FLAC__bool + libflac.FLAC__stream_decoder_set_metadata_respond_all.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_set_metadata_respond_all(decoder): + return libflac.FLAC__stream_decoder_set_metadata_respond_all(decoder) + + libflac.FLAC__stream_decoder_set_metadata_ignore.restype = FLAC__bool + libflac.FLAC__stream_decoder_set_metadata_ignore.argtypes = [POINTER(FLAC__StreamDecoder), FLAC__MetadataType] + + def FLAC__stream_decoder_set_metadata_ignore(decoder, type): + return libflac.FLAC__stream_decoder_set_metadata_ignore(decoder, type) + + libflac.FLAC__stream_decoder_set_metadata_ignore_application.restype = FLAC__bool + libflac.FLAC__stream_decoder_set_metadata_ignore_application.argtypes = [POINTER(FLAC__StreamDecoder), FLAC__byte*4] + + def FLAC__stream_decoder_set_metadata_ignore_application(decoder, id): + return libflac.FLAC__stream_decoder_set_metadata_ignore_application(decoder, id) + + libflac.FLAC__stream_decoder_set_metadata_ignore_all.restype = FLAC__bool + libflac.FLAC__stream_decoder_set_metadata_ignore_all.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_set_metadata_ignore_all(decoder): + return libflac.FLAC__stream_decoder_set_metadata_ignore_all(decoder) + + libflac.FLAC__stream_decoder_get_state.restype = FLAC__StreamDecoderState + libflac.FLAC__stream_decoder_get_state.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_get_state(decoder): + return libflac.FLAC__stream_decoder_get_state(decoder) + + libflac.FLAC__stream_decoder_get_resolved_state_string.restype = c_char_p + libflac.FLAC__stream_decoder_get_resolved_state_string.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_get_resolved_state_string(decoder): + return libflac.FLAC__stream_decoder_get_resolved_state_string(decoder) + + libflac.FLAC__stream_decoder_get_md5_checking.restype = FLAC__bool + libflac.FLAC__stream_decoder_get_md5_checking.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_get_md5_checking(decoder): + return libflac.FLAC__stream_decoder_get_md5_checking(decoder) + + libflac.FLAC__stream_decoder_get_total_samples.restype = FLAC__uint64 + libflac.FLAC__stream_decoder_get_total_samples.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_get_total_samples(decoder): + return libflac.FLAC__stream_decoder_get_total_samples(decoder) + + libflac.FLAC__stream_decoder_get_channels.restype = c_uint + libflac.FLAC__stream_decoder_get_channels.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_get_channels(decoder): + return libflac.FLAC__stream_decoder_get_channels(decoder) + + libflac.FLAC__stream_decoder_get_channel_assignment.restype = FLAC__ChannelAssignment + libflac.FLAC__stream_decoder_get_channel_assignment.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_get_channel_assignment(decoder): + return libflac.FLAC__stream_decoder_get_channel_assignment(decoder) + + libflac.FLAC__stream_decoder_get_bits_per_sample.restype = c_uint + libflac.FLAC__stream_decoder_get_bits_per_sample.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_get_bits_per_sample(decoder): + return libflac.FLAC__stream_decoder_get_bits_per_sample(decoder) + + libflac.FLAC__stream_decoder_get_sample_rate.restype = c_uint + libflac.FLAC__stream_decoder_get_sample_rate.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_get_sample_rate(decoder): + return libflac.FLAC__stream_decoder_get_sample_rate(decoder) + + libflac.FLAC__stream_decoder_get_blocksize.restype = c_uint + libflac.FLAC__stream_decoder_get_blocksize.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_get_blocksize(decoder): + return libflac.FLAC__stream_decoder_get_blocksize(decoder) + + libflac.FLAC__stream_decoder_get_decode_position.restype = FLAC__bool + libflac.FLAC__stream_decoder_get_decode_position.argtypes = [POINTER(FLAC__StreamDecoder), FLAC__uint64_p] + + def FLAC__stream_decoder_get_decode_position(decoder, position): + return libflac.FLAC__stream_decoder_get_decode_position(decoder, position) + + libflac.FLAC__stream_decoder_init_stream.restype = FLAC__StreamDecoderInitStatus + libflac.FLAC__stream_decoder_init_stream.argtypes = [POINTER(FLAC__StreamDecoder), + FLAC__StreamDecoderReadCallback, + FLAC__StreamDecoderSeekCallback, + FLAC__StreamDecoderTellCallback, + FLAC__StreamDecoderLengthCallback, + FLAC__StreamDecoderEofCallback, + FLAC__StreamDecoderWriteCallback, + FLAC__StreamDecoderMetadataCallback, + FLAC__StreamDecoderErrorCallback, + c_void_p] + + def FLAC__stream_decoder_init_stream(decoder, read_callback, seek_callback, tell_callback, length_callback, eof_callback, write_callback, metadata_callback, error_callback, client_data): + return libflac.FLAC__stream_decoder_init_stream(decoder, read_callback, seek_callback, tell_callback, length_callback, eof_callback, write_callback, metadata_callback, error_callback, client_data) + + + libflac.FLAC__stream_decoder_init_ogg_stream.restype = FLAC__StreamDecoderInitStatus + libflac.FLAC__stream_decoder_init_ogg_stream.argtypes = [POINTER(FLAC__StreamDecoder), + FLAC__StreamDecoderReadCallback, + FLAC__StreamDecoderSeekCallback, + FLAC__StreamDecoderTellCallback, + FLAC__StreamDecoderLengthCallback, + FLAC__StreamDecoderEofCallback, + FLAC__StreamDecoderWriteCallback, + FLAC__StreamDecoderMetadataCallback, + FLAC__StreamDecoderErrorCallback, + c_void_p] + + def FLAC__stream_decoder_init_ogg_stream(decoder, read_callback, seek_callback, tell_callback, length_callback, eof_callback, write_callback, metadata_callback, error_callback, client_data): + return libflac.FLAC__stream_decoder_init_ogg_stream(decoder, read_callback, seek_callback, tell_callback, length_callback, eof_callback, write_callback, metadata_callback, error_callback, client_data) + + libflac.FLAC__stream_decoder_init_file.restype = FLAC__StreamDecoderInitStatus + libflac.FLAC__stream_decoder_init_file.argtypes = [POINTER(FLAC__StreamDecoder), + c_char_p, + FLAC__StreamDecoderWriteCallback, + FLAC__StreamDecoderMetadataCallback, + FLAC__StreamDecoderErrorCallback, + c_void_p] + + def FLAC__stream_decoder_init_file(decoder, filename, write_callback, metadata_callback, error_callback, client_data): + return libflac.FLAC__stream_decoder_init_file(decoder, filename, write_callback, metadata_callback, error_callback, client_data) + + libflac.FLAC__stream_decoder_init_ogg_file.restype = FLAC__StreamDecoderInitStatus + libflac.FLAC__stream_decoder_init_ogg_file.argtypes = [POINTER(FLAC__StreamDecoder), + c_char_p, + FLAC__StreamDecoderWriteCallback, + FLAC__StreamDecoderMetadataCallback, + FLAC__StreamDecoderErrorCallback, + c_void_p] + + def FLAC__stream_decoder_init_ogg_file(decoder, filename, write_callback, metadata_callback, error_callback, client_data): + return libflac.FLAC__stream_decoder_init_ogg_file(decoder, filename, write_callback, metadata_callback, error_callback, client_data) + + libflac.FLAC__stream_decoder_finish.restype = FLAC__bool + libflac.FLAC__stream_decoder_finish.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_finish(decoder): + return libflac.FLAC__stream_decoder_finish(decoder) + + libflac.FLAC__stream_decoder_flush.restype = FLAC__bool + libflac.FLAC__stream_decoder_flush.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_flush(decoder): + return libflac.FLAC__stream_decoder_flush(decoder) + + libflac.FLAC__stream_decoder_reset.restype = FLAC__bool + libflac.FLAC__stream_decoder_reset.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_reset(decoder): + return libflac.FLAC__stream_decoder_reset(decoder) + + libflac.FLAC__stream_decoder_process_single.restype = FLAC__bool + libflac.FLAC__stream_decoder_process_single.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_process_single(decoder): + return libflac.FLAC__stream_decoder_process_single(decoder) + + libflac.FLAC__stream_decoder_process_until_end_of_metadata.restype = FLAC__bool + libflac.FLAC__stream_decoder_process_until_end_of_metadata.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_process_until_end_of_metadata(decoder): + return libflac.FLAC__stream_decoder_process_until_end_of_metadata(decoder) + + libflac.FLAC__stream_decoder_process_until_end_of_stream.restype = FLAC__bool + libflac.FLAC__stream_decoder_process_until_end_of_stream.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_process_until_end_of_stream(decoder): + return libflac.FLAC__stream_decoder_process_until_end_of_stream(decoder) + + libflac.FLAC__stream_decoder_skip_single_frame.restype = FLAC__bool + libflac.FLAC__stream_decoder_skip_single_frame.argtypes = [POINTER(FLAC__StreamDecoder)] + + def FLAC__stream_decoder_skip_single_frame(decoder): + return libflac.FLAC__stream_decoder_skip_single_frame(decoder) + + libflac.FLAC__stream_decoder_seek_absolute.restype = FLAC__bool + libflac.FLAC__stream_decoder_seek_absolute.argtypes = [POINTER(FLAC__StreamDecoder), FLAC__uint64] + + def FLAC__stream_decoder_seek_absolute(decoder, sample): + return libflac.FLAC__stream_decoder_seek_absolute(decoder, sample) + + # /stream_decoder + + # stream_encoder + + FLAC__StreamEncoderState = c_int + + libflac.FLAC__StreamEncoderStateString.restype = c_char_p + libflac.FLAC__StreamEncoderStateString.argtypes = [] + + def FLAC__StreamEncoderStateString(): + return libflac.FLAC__StreamEncoderStateString() + + + FLAC__StreamEncoderInitStatus = c_int + + libflac.FLAC__StreamEncoderInitStatusString.restype = c_char_p + libflac.FLAC__StreamEncoderInitStatusString.argtypes = [] + + def FLAC__StreamEncoderInitStatusString(): + return libflac.FLAC__StreamEncoderInitStatusString() + + + FLAC__StreamEncoderReadStatus = c_int + + libflac.FLAC__StreamEncoderReadStatusString.restype = c_char_p + libflac.FLAC__StreamEncoderReadStatusString.argtypes = [] + + def FLAC__StreamEncoderReadStatusString(): + return libflac.FLAC__StreamEncoderReadStatusString() + + + FLAC__StreamEncoderWriteStatus = c_int + + libflac.FLAC__StreamEncoderWriteStatusString.restype = c_char_p + libflac.FLAC__StreamEncoderWriteStatusString.argtypes = [] + + def FLAC__StreamEncoderWriteStatusString(): + return libflac.FLAC__StreamEncoderWriteStatusString() + + + FLAC__StreamEncoderSeekStatus = c_int + + libflac.FLAC__StreamEncoderSeekStatusString.restype = c_char_p + libflac.FLAC__StreamEncoderSeekStatusString.argtypes = [] + + def FLAC__StreamEncoderSeekStatusString(): + return libflac.FLAC__StreamEncoderSeekStatusString() + + + FLAC__StreamEncoderTellStatus = c_int + + libflac.FLAC__StreamEncoderTellStatusString.restype = c_char_p + libflac.FLAC__StreamEncoderTellStatusString.argtypes = [] + + def FLAC__StreamEncoderTellStatusString(): + return libflac.FLAC__StreamEncoderTellStatusString() + + + class FLAC__StreamEncoderProtected(Structure): + _fields_ = [("dummy", c_int)] + + class FLAC__StreamEncoderPrivate(Structure): + _fields_ = [("dummy", c_int)] + + class FLAC__StreamEncoder(Structure): + _fields_ = [("protected_", POINTER(FLAC__StreamEncoderProtected)), + ("private_", POINTER(FLAC__StreamEncoderPrivate))] + + FLAC__StreamEncoderReadCallback = CFUNCTYPE(FLAC__StreamEncoderReadStatus, POINTER(FLAC__StreamEncoder), POINTER(FLAC__byte*0), c_size_t_p, c_void_p) + + FLAC__StreamEncoderWriteCallback = CFUNCTYPE(FLAC__StreamEncoderWriteStatus, POINTER(FLAC__StreamEncoder), POINTER(FLAC__byte*0), c_size_t, c_uint, c_uint, c_void_p) + + FLAC__StreamEncoderSeekCallback = CFUNCTYPE(FLAC__StreamEncoderSeekStatus, POINTER(FLAC__StreamEncoder), FLAC__uint64, c_void_p) + + FLAC__StreamEncoderTellCallback = CFUNCTYPE(FLAC__StreamEncoderTellStatus, POINTER(FLAC__StreamEncoder), FLAC__uint64_p, c_void_p) + + FLAC__StreamEncoderMetadataCallback = CFUNCTYPE(None, POINTER(FLAC__StreamEncoder), POINTER(FLAC__StreamMetadata), c_void_p) + + FLAC__StreamEncoderProgressCallback = CFUNCTYPE(None, POINTER(FLAC__StreamEncoder), FLAC__uint64,FLAC__uint64, c_uint, c_uint, c_void_p) + + + libflac.FLAC__stream_encoder_new.restype = POINTER(FLAC__StreamEncoder) + libflac.FLAC__stream_encoder_new.argtypes = [] + + def FLAC__stream_encoder_new(): + return libflac.FLAC__stream_encoder_new() + + libflac.FLAC__stream_encoder_delete.restype = None + libflac.FLAC__stream_encoder_delete.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_delete(encoder): + return libflac.FLAC__stream_encoder_delete(encoder) + + + libflac.FLAC__stream_encoder_set_ogg_serial_number.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_ogg_serial_number.argtypes = [POINTER(FLAC__StreamEncoder), c_long] + + def FLAC__stream_encoder_set_ogg_serial_number(encoder, serial_number): + return libflac.FLAC__stream_encoder_set_ogg_serial_number(encoder, serial_number) + + libflac.FLAC__stream_encoder_set_verify.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_verify.argtypes = [POINTER(FLAC__StreamEncoder), FLAC__bool] + + def FLAC__stream_encoder_set_verify(encoder, value): + return libflac.FLAC__stream_encoder_set_verify(encoder, value) + + libflac.FLAC__stream_encoder_set_streamable_subset.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_streamable_subset.argtypes = [POINTER(FLAC__StreamEncoder), FLAC__bool] + + def FLAC__stream_encoder_set_streamable_subset(encoder, value): + return libflac.FLAC__stream_encoder_set_streamable_subset(encoder, value) + + libflac.FLAC__stream_encoder_set_channels.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_channels.argtypes = [POINTER(FLAC__StreamEncoder), c_uint] + + def FLAC__stream_encoder_set_channels(encoder, value): + return libflac.FLAC__stream_encoder_set_channels(encoder, value) + + libflac.FLAC__stream_encoder_set_bits_per_sample.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_bits_per_sample.argtypes = [POINTER(FLAC__StreamEncoder), c_uint] + + def FLAC__stream_encoder_set_bits_per_sample(encoder, value): + return libflac.FLAC__stream_encoder_set_bits_per_sample(encoder, value) + + libflac.FLAC__stream_encoder_set_sample_rate.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_sample_rate.argtypes = [POINTER(FLAC__StreamEncoder), c_uint] + + def FLAC__stream_encoder_set_sample_rate(encoder, value): + return libflac.FLAC__stream_encoder_set_sample_rate(encoder, value) + + libflac.FLAC__stream_encoder_set_compression_level.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_compression_level.argtypes = [POINTER(FLAC__StreamEncoder), c_uint] + + def FLAC__stream_encoder_set_compression_level(encoder, value): + return libflac.FLAC__stream_encoder_set_compression_level(encoder, value) + + libflac.FLAC__stream_encoder_set_blocksize.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_blocksize.argtypes = [POINTER(FLAC__StreamEncoder), c_uint] + + def FLAC__stream_encoder_set_blocksize(encoder, value): + return libflac.FLAC__stream_encoder_set_blocksize(encoder, value) + + libflac.FLAC__stream_encoder_set_do_mid_side_stereo.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_do_mid_side_stereo.argtypes = [POINTER(FLAC__StreamEncoder), FLAC__bool] + + def FLAC__stream_encoder_set_do_mid_side_stereo(encoder, value): + return libflac.FLAC__stream_encoder_set_do_mid_side_stereo(encoder, value) + + libflac.FLAC__stream_encoder_set_loose_mid_side_stereo.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_loose_mid_side_stereo.argtypes = [POINTER(FLAC__StreamEncoder), FLAC__bool] + + def FLAC__stream_encoder_set_loose_mid_side_stereo(encoder, value): + return libflac.FLAC__stream_encoder_set_loose_mid_side_stereo(encoder, value) + + libflac.FLAC__stream_encoder_set_apodization.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_apodization.argtypes = [POINTER(FLAC__StreamEncoder), c_char_p] + + def FLAC__stream_encoder_set_apodization(encoder, specification): + return libflac.FLAC__stream_encoder_set_apodization(encoder, specification) + + libflac.FLAC__stream_encoder_set_max_lpc_order.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_max_lpc_order.argtypes = [POINTER(FLAC__StreamEncoder), c_uint] + + def FLAC__stream_encoder_set_max_lpc_order(encoder, value): + return libflac.FLAC__stream_encoder_set_max_lpc_order(encoder, value) + + libflac.FLAC__stream_encoder_set_qlp_coeff_precision.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_qlp_coeff_precision.argtypes = [POINTER(FLAC__StreamEncoder), c_uint] + + def FLAC__stream_encoder_set_qlp_coeff_precision(encoder, value): + return libflac.FLAC__stream_encoder_set_qlp_coeff_precision(encoder, value) + + libflac.FLAC__stream_encoder_set_do_qlp_coeff_prec_search.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_do_qlp_coeff_prec_search.argtypes = [POINTER(FLAC__StreamEncoder), FLAC__bool] + + def FLAC__stream_encoder_set_do_qlp_coeff_prec_search(encoder, value): + return libflac.FLAC__stream_encoder_set_do_qlp_coeff_prec_search(encoder, value) + + libflac.FLAC__stream_encoder_set_do_escape_coding.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_do_escape_coding.argtypes = [POINTER(FLAC__StreamEncoder), FLAC__bool] + + def FLAC__stream_encoder_set_do_escape_coding(encoder, value): + return libflac.FLAC__stream_encoder_set_do_escape_coding(encoder, value) + + libflac.FLAC__stream_encoder_set_do_exhaustive_model_search.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_do_exhaustive_model_search.argtypes = [POINTER(FLAC__StreamEncoder), FLAC__bool] + + def FLAC__stream_encoder_set_do_exhaustive_model_search(encoder, value): + return libflac.FLAC__stream_encoder_set_do_exhaustive_model_search(encoder, value) + + libflac.FLAC__stream_encoder_set_min_residual_partition_order.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_min_residual_partition_order.argtypes = [POINTER(FLAC__StreamEncoder), FLAC__bool] + + def FLAC__stream_encoder_set_min_residual_partition_order(encoder, value): + return libflac.FLAC__stream_encoder_set_min_residual_partition_order(encoder, value) + + libflac.FLAC__stream_encoder_set_max_residual_partition_order.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_max_residual_partition_order.argtypes = [POINTER(FLAC__StreamEncoder), FLAC__bool] + + def FLAC__stream_encoder_set_max_residual_partition_order(encoder, value): + return libflac.FLAC__stream_encoder_set_max_residual_partition_order(encoder, value) + + libflac.FLAC__stream_encoder_set_rice_parameter_search_dist.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_rice_parameter_search_dist.argtypes = [POINTER(FLAC__StreamEncoder), FLAC__bool] + + def FLAC__stream_encoder_set_rice_parameter_search_dist(encoder, value): + return libflac.FLAC__stream_encoder_set_rice_parameter_search_dist(encoder, value) + + libflac.FLAC__stream_encoder_set_total_samples_estimate.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_total_samples_estimate.argtypes = [POINTER(FLAC__StreamEncoder), FLAC__uint64] + + def FLAC__stream_encoder_set_total_samples_estimate(encoder, value): + return libflac.FLAC__stream_encoder_set_total_samples_estimate(encoder, value) + + libflac.FLAC__stream_encoder_set_metadata.restype = FLAC__bool + libflac.FLAC__stream_encoder_set_metadata.argtypes = [POINTER(FLAC__StreamEncoder), POINTER(POINTER(FLAC__StreamMetadata)), c_uint] + + def FLAC__stream_encoder_set_metadata(encoder, metadata, num_blocks): + return libflac.FLAC__stream_encoder_set_metadata(encoder, metadata, num_blocks) + + libflac.FLAC__stream_encoder_get_state.restype = FLAC__StreamEncoderState + libflac.FLAC__stream_encoder_get_state.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_state(encoder): + return libflac.FLAC__stream_encoder_get_state(encoder) + + libflac.FLAC__stream_encoder_get_verify_decoder_state.restype = FLAC__StreamEncoderState + libflac.FLAC__stream_encoder_get_verify_decoder_state.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_verify_decoder_state(encoder): + return libflac.FLAC__stream_encoder_get_verify_decoder_state(encoder) + + libflac.FLAC__stream_encoder_get_resolved_state_string.restype = c_char_p + libflac.FLAC__stream_encoder_get_resolved_state_string.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_resolved_state_string(encoder): + return libflac.FLAC__stream_encoder_get_resolved_state_string(encoder) + + libflac.FLAC__stream_encoder_get_verify_decoder_error_stats.restype = None + libflac.FLAC__stream_encoder_get_verify_decoder_error_stats.argtypes = [POINTER(FLAC__StreamEncoder), FLAC__uint64_p, c_uint_p, c_uint_p, c_uint_p, FLAC__int32_p, FLAC__int32_p] + + def FLAC__stream_encoder_get_verify_decoder_error_stats(encoder, absolute_sample, frame_number, channel, sample, expected, got): + return libflac.FLAC__stream_encoder_get_verify_decoder_error_stats(encoder, absolute_sample, frame_number, channel, sample, expected, got) + + libflac.FLAC__stream_encoder_get_verify.restype = FLAC__bool + libflac.FLAC__stream_encoder_get_verify.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_verify(encoder): + return libflac.FLAC__stream_encoder_get_verify(encoder) + + libflac.FLAC__stream_encoder_get_streamable_subset.restype = FLAC__bool + libflac.FLAC__stream_encoder_get_streamable_subset.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_streamable_subset(encoder): + return libflac.FLAC__stream_encoder_get_streamable_subset(encoder) + + libflac.FLAC__stream_encoder_get_channels.restype = c_uint + libflac.FLAC__stream_encoder_get_channels.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_channels(encoder): + return libflac.FLAC__stream_encoder_get_channels(encoder) + + libflac.FLAC__stream_encoder_get_bits_per_sample.restype = c_uint + libflac.FLAC__stream_encoder_get_bits_per_sample.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_bits_per_sample(encoder): + return libflac.FLAC__stream_encoder_get_bits_per_sample(encoder) + + libflac.FLAC__stream_encoder_get_sample_rate.restype = c_uint + libflac.FLAC__stream_encoder_get_sample_rate.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_sample_rate(encoder): + return libflac.FLAC__stream_encoder_get_sample_rate(encoder) + + libflac.FLAC__stream_encoder_get_blocksize.restype = c_uint + libflac.FLAC__stream_encoder_get_blocksize.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_blocksize(encoder): + return libflac.FLAC__stream_encoder_get_blocksize(encoder) + + libflac.FLAC__stream_encoder_get_do_mid_side_stereo.restype = FLAC__bool + libflac.FLAC__stream_encoder_get_do_mid_side_stereo.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_do_mid_side_stereo(encoder): + return libflac.FLAC__stream_encoder_get_do_mid_side_stereo(encoder) + + libflac.FLAC__stream_encoder_get_loose_mid_side_stereo.restype = FLAC__bool + libflac.FLAC__stream_encoder_get_loose_mid_side_stereo.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_loose_mid_side_stereo(encoder): + return libflac.FLAC__stream_encoder_get_loose_mid_side_stereo(encoder) + + libflac.FLAC__stream_encoder_get_max_lpc_order.restype = c_uint + libflac.FLAC__stream_encoder_get_max_lpc_order.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_max_lpc_order(encoder): + return libflac.FLAC__stream_encoder_get_max_lpc_order(encoder) + + libflac.FLAC__stream_encoder_get_qlp_coeff_precision.restype = c_uint + libflac.FLAC__stream_encoder_get_qlp_coeff_precision.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_qlp_coeff_precision(encoder): + return libflac.FLAC__stream_encoder_get_qlp_coeff_precision(encoder) + + libflac.FLAC__stream_encoder_get_do_qlp_coeff_prec_search.restype = FLAC__bool + libflac.FLAC__stream_encoder_get_do_qlp_coeff_prec_search.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_do_qlp_coeff_prec_search(encoder): + return libflac.FLAC__stream_encoder_get_do_qlp_coeff_prec_search(encoder) + + libflac.FLAC__stream_encoder_get_do_escape_coding.restype = FLAC__bool + libflac.FLAC__stream_encoder_get_do_escape_coding.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_do_escape_coding(encoder): + return libflac.FLAC__stream_encoder_get_do_escape_coding(encoder) + + libflac.FLAC__stream_encoder_get_do_exhaustive_model_search.restype = FLAC__bool + libflac.FLAC__stream_encoder_get_do_exhaustive_model_search.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_do_exhaustive_model_search(encoder): + return libflac.FLAC__stream_encoder_get_do_exhaustive_model_search(encoder) + + libflac.FLAC__stream_encoder_get_min_residual_partition_order.restype = c_uint + libflac.FLAC__stream_encoder_get_min_residual_partition_order.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_min_residual_partition_order(encoder): + return libflac.FLAC__stream_encoder_get_min_residual_partition_order(encoder) + + libflac.FLAC__stream_encoder_get_max_residual_partition_order.restype = c_uint + libflac.FLAC__stream_encoder_get_max_residual_partition_order.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_max_residual_partition_order(encoder): + return libflac.FLAC__stream_encoder_get_max_residual_partition_order(encoder) + + libflac.FLAC__stream_encoder_get_rice_parameter_search_dist.restype = c_uint + libflac.FLAC__stream_encoder_get_rice_parameter_search_dist.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_rice_parameter_search_dist(encoder): + return libflac.FLAC__stream_encoder_get_rice_parameter_search_dist(encoder) + + libflac.FLAC__stream_encoder_get_total_samples_estimate.restype = FLAC__uint64 + libflac.FLAC__stream_encoder_get_total_samples_estimate.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_get_total_samples_estimate(encoder): + return libflac.FLAC__stream_encoder_get_total_samples_estimate(encoder) + + libflac.FLAC__stream_encoder_init_stream.restype = FLAC__StreamEncoderInitStatus + libflac.FLAC__stream_encoder_init_stream.argtypes = [POINTER(FLAC__StreamEncoder), + FLAC__StreamEncoderWriteCallback, + FLAC__StreamEncoderSeekCallback, + FLAC__StreamEncoderTellCallback, + FLAC__StreamEncoderMetadataCallback, + c_void_p] + + def FLAC__stream_encoder_init_stream(encoder, write_callback, seek_callback, tell_callback, metadata_callback,client_data): + return libflac.FLAC__stream_encoder_init_stream(encoder, write_callback, seek_callback, tell_callback, metadata_callback,client_data) + + libflac.FLAC__stream_encoder_init_ogg_stream.restype = FLAC__StreamEncoderInitStatus + libflac.FLAC__stream_encoder_init_ogg_stream.argtypes = [POINTER(FLAC__StreamEncoder), + FLAC__StreamEncoderReadCallback, + FLAC__StreamEncoderWriteCallback, + FLAC__StreamEncoderSeekCallback, + FLAC__StreamEncoderTellCallback, + FLAC__StreamEncoderMetadataCallback, + c_void_p] + + def FLAC__stream_encoder_init_ogg_stream(encoder, read_callback, write_callback, seek_callback, tell_callback, metadata_callback,client_data): + return libflac.FLAC__stream_encoder_init_ogg_stream(encoder, read_callback, write_callback, seek_callback, tell_callback, metadata_callback,client_data) + + libflac.FLAC__stream_encoder_init_file.restype = FLAC__StreamEncoderInitStatus + libflac.FLAC__stream_encoder_init_file.argtypes = [POINTER(FLAC__StreamEncoder), + c_char_p, + FLAC__StreamEncoderProgressCallback, + c_void_p] + + def FLAC__stream_encoder_init_file(encoder, filename, progress_callback,client_data): + return libflac.FLAC__stream_encoder_init_file(encoder, filename, progress_callback,client_data) + + + libflac.FLAC__stream_encoder_init_ogg_file.restype = FLAC__StreamEncoderInitStatus + libflac.FLAC__stream_encoder_init_ogg_file.argtypes = [POINTER(FLAC__StreamEncoder), + c_char_p, + FLAC__StreamEncoderProgressCallback, + c_void_p] + + def FLAC__stream_encoder_init_ogg_file(encoder, filename, progress_callback,client_data): + return libflac.FLAC__stream_encoder_init_ogg_file(encoder, filename, progress_callback,client_data) + + libflac.FLAC__stream_encoder_finish.restype = FLAC__bool + libflac.FLAC__stream_encoder_finish.argtypes = [POINTER(FLAC__StreamEncoder)] + + def FLAC__stream_encoder_finish(encoder): + return libflac.FLAC__stream_encoder_finish(encoder) + + libflac.FLAC__stream_encoder_process.restype = FLAC__bool + libflac.FLAC__stream_encoder_process.argtypes = [POINTER(FLAC__StreamEncoder), POINTER(FLAC__int32_p*0), c_uint] + + def FLAC__stream_encoder_process(encoder, buffer, samples): + return libflac.FLAC__stream_encoder_process(encoder, buffer, samples) + + libflac.FLAC__stream_encoder_process_interleaved.restype = FLAC__bool + libflac.FLAC__stream_encoder_process_interleaved.argtypes = [POINTER(FLAC__StreamEncoder), POINTER(FLAC__int32*0), c_uint] + + def FLAC__stream_encoder_process_interleaved(encoder, buffer, samples): + return libflac.FLAC__stream_encoder_process_interleaved(encoder, buffer, samples) + + # /stream_encoder diff --git a/sbapp/pyogg/flac_file.py b/sbapp/pyogg/flac_file.py new file mode 100644 index 0000000..7e97ca7 --- /dev/null +++ b/sbapp/pyogg/flac_file.py @@ -0,0 +1,114 @@ +import ctypes +from itertools import chain + +from . import flac +from .audio_file import AudioFile +from .pyogg_error import PyOggError + +def _to_char_p(string): + try: + return ctypes.c_char_p(string.encode("utf-8")) + except: + return ctypes.c_char_p(string) + +def _resize_array(array, new_size): + return (array._type_*new_size).from_address(ctypes.addressof(array)) + + +class FlacFile(AudioFile): + def write_callback(self, decoder, frame, buffer, client_data): + multi_channel_buf = _resize_array(buffer.contents, self.channels) + arr_size = frame.contents.header.blocksize + if frame.contents.header.channels >= 2: + arrays = [] + for i in range(frame.contents.header.channels): + arr = ctypes.cast(multi_channel_buf[i], ctypes.POINTER(flac.FLAC__int32*arr_size)).contents + arrays.append(arr[:]) + + arr = list(chain.from_iterable(zip(*arrays))) + + self.buffer[self.buffer_pos : self.buffer_pos + len(arr)] = arr[:] + self.buffer_pos += len(arr) + + else: + arr = ctypes.cast(multi_channel_buf[0], ctypes.POINTER(flac.FLAC__int32*arr_size)).contents + self.buffer[self.buffer_pos : self.buffer_pos + arr_size] = arr[:] + self.buffer_pos += arr_size + return 0 + + def metadata_callback(self,decoder, metadata, client_data): + if not self.buffer: + self.total_samples = metadata.contents.data.stream_info.total_samples + self.channels = metadata.contents.data.stream_info.channels + Buffer = flac.FLAC__int16*(self.total_samples * self.channels) + self.buffer = Buffer() + self.frequency = metadata.contents.data.stream_info.sample_rate + + def error_callback(self,decoder, status, client_data): + raise PyOggError("An error occured during the process of decoding. Status enum: {}".format(flac.FLAC__StreamDecoderErrorStatusEnum[status])) + + def __init__(self, path): + self.decoder = flac.FLAC__stream_decoder_new() + + self.client_data = ctypes.c_void_p() + + #: Number of channels in audio file. + self.channels = None + + #: Number of samples per second (per channel). For + # example, 44100. + self.frequency = None + + self.total_samples = None + + #: Raw PCM data from audio file. + self.buffer = None + + self.buffer_pos = 0 + + write_callback_ = flac.FLAC__StreamDecoderWriteCallback(self.write_callback) + + metadata_callback_ = flac.FLAC__StreamDecoderMetadataCallback(self.metadata_callback) + + error_callback_ = flac.FLAC__StreamDecoderErrorCallback(self.error_callback) + + init_status = flac.FLAC__stream_decoder_init_file( + self.decoder, + _to_char_p(path), # This will have an issue with Unicode filenames + write_callback_, + metadata_callback_, + error_callback_, + self.client_data + ) + + if init_status: # error + error = flac.FLAC__StreamDecoderInitStatusEnum[init_status] + raise PyOggError( + "An error occured when trying to open '{}': {}".format(path, error) + ) + + metadata_status = (flac.FLAC__stream_decoder_process_until_end_of_metadata(self.decoder)) + if not metadata_status: # error + raise PyOggError("An error occured when trying to decode the metadata of {}".format(path)) + + stream_status = (flac.FLAC__stream_decoder_process_until_end_of_stream(self.decoder)) + if not stream_status: # error + raise PyOggError("An error occured when trying to decode the audio stream of {}".format(path)) + + flac.FLAC__stream_decoder_finish(self.decoder) + + #: Length of buffer + self.buffer_length = len(self.buffer) + + self.bytes_per_sample = ctypes.sizeof(flac.FLAC__int16) # See definition of Buffer in metadata_callback() + + # Cast buffer to one-dimensional array of chars + CharBuffer = ( + ctypes.c_byte * + (self.bytes_per_sample * len(self.buffer)) + ) + self.buffer = CharBuffer.from_buffer(self.buffer) + + # FLAC audio is always signed. See + # https://xiph.org/flac/api/group__flac__stream__decoder.html#gaf98a4f9e2cac5747da6018c3dfc8dde1 + self.signed = True diff --git a/sbapp/pyogg/flac_file_stream.py b/sbapp/pyogg/flac_file_stream.py new file mode 100644 index 0000000..f832c31 --- /dev/null +++ b/sbapp/pyogg/flac_file_stream.py @@ -0,0 +1,141 @@ +import ctypes +from itertools import chain + +from . import flac +from .pyogg_error import PyOggError + +def _to_char_p(string): + try: + return ctypes.c_char_p(string.encode("utf-8")) + except: + return ctypes.c_char_p(string) + +def _resize_array(array, new_size): + return (array._type_*new_size).from_address(ctypes.addressof(array)) + + +class FlacFileStream: + def write_callback(self,decoder, frame, buffer, client_data): + multi_channel_buf = _resize_array(buffer.contents, self.channels) + arr_size = frame.contents.header.blocksize + if frame.contents.header.channels >= 2: + arrays = [] + for i in range(frame.contents.header.channels): + arr = ctypes.cast(multi_channel_buf[i], ctypes.POINTER(flac.FLAC__int32*arr_size)).contents + arrays.append(arr[:]) + + arr = list(chain.from_iterable(zip(*arrays))) + + self.buffer = (flac.FLAC__int16*len(arr))(*arr) + self.bytes_written = len(arr) * 2 + + else: + arr = ctypes.cast(multi_channel_buf[0], ctypes.POINTER(flac.FLAC__int32*arr_size)).contents + self.buffer = (flac.FLAC__int16*len(arr))(*arr[:]) + self.bytes_written = arr_size * 2 + return 0 + + def metadata_callback(self,decoder, metadata, client_data): + self.total_samples = metadata.contents.data.stream_info.total_samples + self.channels = metadata.contents.data.stream_info.channels + self.frequency = metadata.contents.data.stream_info.sample_rate + + def error_callback(self,decoder, status, client_data): + raise PyOggError("An error occured during the process of decoding. Status enum: {}".format(flac.FLAC__StreamDecoderErrorStatusEnum[status])) + + def __init__(self, path): + self.decoder = flac.FLAC__stream_decoder_new() + + self.client_data = ctypes.c_void_p() + + #: Number of channels in audio file. + self.channels = None + + #: Number of samples per second (per channel). For + # example, 44100. + self.frequency = None + + self.total_samples = None + + self.buffer = None + + self.bytes_written = None + + self.write_callback_ = flac.FLAC__StreamDecoderWriteCallback(self.write_callback) + + self.metadata_callback_ = flac.FLAC__StreamDecoderMetadataCallback(self.metadata_callback) + + self.error_callback_ = flac.FLAC__StreamDecoderErrorCallback(self.error_callback) + + init_status = flac.FLAC__stream_decoder_init_file(self.decoder, + _to_char_p(path), + self.write_callback_, + self.metadata_callback_, + self.error_callback_, + self.client_data) + + if init_status: # error + raise PyOggError("An error occured when trying to open '{}': {}".format(path, flac.FLAC__StreamDecoderInitStatusEnum[init_status])) + + metadata_status = (flac.FLAC__stream_decoder_process_until_end_of_metadata(self.decoder)) + if not metadata_status: # error + raise PyOggError("An error occured when trying to decode the metadata of {}".format(path)) + + #: Bytes per sample + self.bytes_per_sample = 2 + + def get_buffer(self): + """Returns the buffer. + + Returns buffer (a bytes object) or None if all data has + been read from the file. + + """ + # Attempt to read a single frame of audio + stream_status = (flac.FLAC__stream_decoder_process_single(self.decoder)) + if not stream_status: # error + raise PyOggError("An error occured when trying to decode the audio stream of {}".format(path)) + + # Check if we encountered the end of the stream + if (flac.FLAC__stream_decoder_get_state(self.decoder) == 4): # end of stream + return None + + buffer_as_bytes = bytes(self.buffer) + return buffer_as_bytes + + def clean_up(self): + flac.FLAC__stream_decoder_finish(self.decoder) + + def get_buffer_as_array(self): + """Provides the buffer as a NumPy array. + + Note that the underlying data type is 16-bit signed + integers. + + Does not copy the underlying data, so the returned array + should either be processed or copied before the next call + to get_buffer() or get_buffer_as_array(). + + """ + import numpy # type: ignore + + # Read the next samples from the stream + buf = self.get_buffer() + + # Check if we've come to the end of the stream + if buf is None: + return None + + # Convert the bytes buffer to a NumPy array + array = numpy.frombuffer( + buf, + dtype=numpy.int16 + ) + + # Reshape the array + return array.reshape( + (len(buf) + // self.bytes_per_sample + // self.channels, + self.channels) + ) diff --git a/sbapp/pyogg/library_loader.py b/sbapp/pyogg/library_loader.py new file mode 100644 index 0000000..711b1ba --- /dev/null +++ b/sbapp/pyogg/library_loader.py @@ -0,0 +1,147 @@ +import ctypes +import ctypes.util +import os +import sys +import platform +from typing import ( + Optional, + Dict, + List +) + +_here = os.path.dirname(__file__) + +class ExternalLibraryError(Exception): + pass + +architecture = platform.architecture()[0] + +_windows_styles = ["{}", "lib{}", "lib{}_dynamic", "{}_dynamic"] + +_other_styles = ["{}", "lib{}"] + +if architecture == "32bit": + for arch_style in ["32bit", "32" "86", "win32", "x86", "_x86", "_32", "_win32", "_32bit"]: + for style in ["{}", "lib{}"]: + _windows_styles.append(style.format("{}"+arch_style)) + +elif architecture == "64bit": + for arch_style in ["64bit", "64" "86_64", "amd64", "win_amd64", "x86_64", "_x86_64", "_64", "_amd64", "_64bit"]: + for style in ["{}", "lib{}"]: + _windows_styles.append(style.format("{}"+arch_style)) + + +run_tests = lambda lib, tests: [f(lib) for f in tests] + +# Get the appropriate directory for the shared libraries depending +# on the current platform and architecture +platform_ = platform.system() +lib_dir = None +if platform_ == "Darwin": + lib_dir = "libs/macos" +elif platform_ == "Windows": + if architecture == "32bit": + lib_dir = "libs/win32" + elif architecture == "64bit": + lib_dir = "libs/win_amd64" + + +class Library: + @staticmethod + def load(names: Dict[str, str], paths: Optional[List[str]] = None, tests = []) -> Optional[ctypes.CDLL]: + lib = InternalLibrary.load(names, tests) + if lib is None: + lib = ExternalLibrary.load(names["external"], paths, tests) + return lib + + +class InternalLibrary: + @staticmethod + def load(names: Dict[str, str], tests) -> Optional[ctypes.CDLL]: + # If we do not have a library directory, give up immediately + if lib_dir is None: + return None + + # Get the appropriate library filename given the platform + try: + name = names[platform_] + except KeyError: + return None + + # Attempt to load the library from here + path = _here + "/" + lib_dir + "/" + name + try: + lib = ctypes.CDLL(path) + except OSError as e: + return None + + # Check that the library passes the tests + if tests and all(run_tests(lib, tests)): + return lib + + # Library failed tests + return None + +# Cache of libraries that have already been loaded +_loaded_libraries: Dict[str, ctypes.CDLL] = {} + +class ExternalLibrary: + @staticmethod + def load(name, paths = None, tests = []): + if name in _loaded_libraries: + return _loaded_libraries[name] + if sys.platform == "win32": + lib = ExternalLibrary.load_windows(name, paths, tests) + _loaded_libraries[name] = lib + return lib + else: + lib = ExternalLibrary.load_other(name, paths, tests) + _loaded_libraries[name] = lib + return lib + + @staticmethod + def load_other(name, paths = None, tests = []): + os.environ["PATH"] += ";" + ";".join((os.getcwd(), _here)) + if paths: os.environ["PATH"] += ";" + ";".join(paths) + + for style in _other_styles: + candidate = style.format(name) + library = ctypes.util.find_library(candidate) + if library: + try: + lib = ctypes.CDLL(library) + if tests and all(run_tests(lib, tests)): + return lib + except: + pass + + @staticmethod + def load_windows(name, paths = None, tests = []): + os.environ["PATH"] += ";" + ";".join((os.getcwd(), _here)) + if paths: os.environ["PATH"] += ";" + ";".join(paths) + + not_supported = [] # libraries that were found, but are not supported + for style in _windows_styles: + candidate = style.format(name) + library = ctypes.util.find_library(candidate) + if library: + try: + lib = ctypes.CDLL(library) + if tests and all(run_tests(lib, tests)): + return lib + not_supported.append(library) + except WindowsError: + pass + except OSError: + not_supported.append(library) + + + if not_supported: + raise ExternalLibraryError("library '{}' couldn't be loaded, because the following candidates were not supported:".format(name) + + ("\n{}" * len(not_supported)).format(*not_supported)) + + raise ExternalLibraryError("library '{}' couldn't be loaded".format(name)) + + + + diff --git a/sbapp/pyogg/ogg.py b/sbapp/pyogg/ogg.py new file mode 100644 index 0000000..08a944b --- /dev/null +++ b/sbapp/pyogg/ogg.py @@ -0,0 +1,672 @@ +############################################################ +# Ogg license: # +############################################################ +""" +Copyright (c) 2002, Xiph.org Foundation + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +- Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +- Neither the name of the Xiph.org Foundation nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import ctypes +from ctypes import c_int, c_int8, c_int16, c_int32, c_int64, c_uint, c_uint8, c_uint16, c_uint32, c_uint64, c_float, c_long, c_ulong, c_char, c_char_p, c_ubyte, c_longlong, c_ulonglong, c_size_t, c_void_p, c_double, POINTER, pointer, cast +import ctypes.util +import sys +from traceback import print_exc as _print_exc +import os + +from .library_loader import Library, ExternalLibrary, ExternalLibraryError + + +def get_raw_libname(name): + name = os.path.splitext(name)[0].lower() + for x in "0123456789._- ":name=name.replace(x,"") + return name + +# Define a function to convert strings to char-pointers. In Python 3 +# all strings are Unicode, while in Python 2 they were ASCII-encoded. +# FIXME: Does PyOgg even support Python 2? +if sys.version_info.major > 2: + to_char_p = lambda s: s.encode('utf-8') +else: + to_char_p = lambda s: s + +__here = os.getcwd() + +libogg = None + +try: + names = { + "Windows": "ogg.dll", + "Darwin": "libogg.0.dylib", + "external": "ogg" + } + libogg = Library.load(names, tests = [lambda lib: hasattr(lib, "oggpack_writeinit")]) +except ExternalLibraryError: + pass +except: + _print_exc() + +if libogg is not None: + PYOGG_OGG_AVAIL = True +else: + PYOGG_OGG_AVAIL = False + +if PYOGG_OGG_AVAIL: + # Sanity check also satisfies mypy type checking + assert libogg is not None + + # ctypes + c_ubyte_p = POINTER(c_ubyte) + c_uchar = c_ubyte + c_uchar_p = c_ubyte_p + c_float_p = POINTER(c_float) + c_float_p_p = POINTER(c_float_p) + c_float_p_p_p = POINTER(c_float_p_p) + c_char_p_p = POINTER(c_char_p) + c_int_p = POINTER(c_int) + c_long_p = POINTER(c_long) + + # os_types + ogg_int16_t = c_int16 + ogg_uint16_t = c_uint16 + ogg_int32_t = c_int32 + ogg_uint32_t = c_uint32 + ogg_int64_t = c_int64 + ogg_uint64_t = c_uint64 + ogg_int64_t_p = POINTER(ogg_int64_t) + + # ogg + class ogg_iovec_t(ctypes.Structure): + """ + Wrapper for: + typedef struct ogg_iovec_t; + """ + _fields_ = [("iov_base", c_void_p), + ("iov_len", c_size_t)] + + class oggpack_buffer(ctypes.Structure): + """ + Wrapper for: + typedef struct oggpack_buffer; + """ + _fields_ = [("endbyte", c_long), + ("endbit", c_int), + ("buffer", c_uchar_p), + ("ptr", c_uchar_p), + ("storage", c_long)] + + class ogg_page(ctypes.Structure): + """ + Wrapper for: + typedef struct ogg_page; + """ + _fields_ = [("header", c_uchar_p), + ("header_len", c_long), + ("body", c_uchar_p), + ("body_len", c_long)] + + class ogg_stream_state(ctypes.Structure): + """ + Wrapper for: + typedef struct ogg_stream_state; + """ + _fields_ = [("body_data", c_uchar_p), + ("body_storage", c_long), + ("body_fill", c_long), + ("body_returned", c_long), + + ("lacing_vals", c_int), + ("granule_vals", ogg_int64_t), + + ("lacing_storage", c_long), + ("lacing_fill", c_long), + ("lacing_packet", c_long), + ("lacing_returned", c_long), + + ("header", c_uchar*282), + ("header_fill", c_int), + + ("e_o_s", c_int), + ("b_o_s", c_int), + + ("serialno", c_long), + ("pageno", c_long), + ("packetno", ogg_int64_t), + ("granulepos", ogg_int64_t)] + + class ogg_packet(ctypes.Structure): + """ + Wrapper for: + typedef struct ogg_packet; + """ + _fields_ = [("packet", c_uchar_p), + ("bytes", c_long), + ("b_o_s", c_long), + ("e_o_s", c_long), + + ("granulepos", ogg_int64_t), + + ("packetno", ogg_int64_t)] + + def __str__(self): + bos = "" + if self.b_o_s: + bos = "beginning of stream, " + eos = "" + if self.e_o_s: + eos = "end of stream, " + + # Converting the data will cause a seg-fault if the memory isn't valid + data = bytes(self.packet[0:self.bytes]) + value = ( + f"Ogg Packet <{hex(id(self))}>: " + + f"number {self.packetno}, " + + f"granule position {self.granulepos}, " + + bos + eos + + f"{self.bytes} bytes" + ) + return value + + class ogg_sync_state(ctypes.Structure): + """ + Wrapper for: + typedef struct ogg_sync_state; + """ + _fields_ = [("data", c_uchar_p), + ("storage", c_int), + ("fill", c_int), + ("returned", c_int), + + ("unsynched", c_int), + ("headerbytes", c_int), + ("bodybytes", c_int)] + + b_p = POINTER(oggpack_buffer) + oy_p = POINTER(ogg_sync_state) + op_p = POINTER(ogg_packet) + og_p = POINTER(ogg_page) + os_p = POINTER(ogg_stream_state) + iov_p = POINTER(ogg_iovec_t) + + libogg.oggpack_writeinit.restype = None + libogg.oggpack_writeinit.argtypes = [b_p] + + def oggpack_writeinit(b): + libogg.oggpack_writeinit(b) + + try: + libogg.oggpack_writecheck.restype = c_int + libogg.oggpack_writecheck.argtypes = [b_p] + def oggpack_writecheck(b): + libogg.oggpack_writecheck(b) + except: + pass + + libogg.oggpack_writetrunc.restype = None + libogg.oggpack_writetrunc.argtypes = [b_p, c_long] + + def oggpack_writetrunc(b, bits): + libogg.oggpack_writetrunc(b, bits) + + libogg.oggpack_writealign.restype = None + libogg.oggpack_writealign.argtypes = [b_p] + + def oggpack_writealign(b): + libogg.oggpack_writealign(b) + + libogg.oggpack_writecopy.restype = None + libogg.oggpack_writecopy.argtypes = [b_p, c_void_p, c_long] + + def oggpack_writecopy(b, source, bits): + libogg.oggpack_writecopy(b, source, bits) + + libogg.oggpack_reset.restype = None + libogg.oggpack_reset.argtypes = [b_p] + + def oggpack_reset(b): + libogg.oggpack_reset(b) + + libogg.oggpack_writeclear.restype = None + libogg.oggpack_writeclear.argtypes = [b_p] + + def oggpack_writeclear(b): + libogg.oggpack_writeclear(b) + + libogg.oggpack_readinit.restype = None + libogg.oggpack_readinit.argtypes = [b_p, c_uchar_p, c_int] + + def oggpack_readinit(b, buf, bytes): + libogg.oggpack_readinit(b, buf, bytes) + + libogg.oggpack_write.restype = None + libogg.oggpack_write.argtypes = [b_p, c_ulong, c_int] + + def oggpack_write(b, value, bits): + libogg.oggpack_write(b, value, bits) + + libogg.oggpack_look.restype = c_long + libogg.oggpack_look.argtypes = [b_p, c_int] + + def oggpack_look(b, bits): + return libogg.oggpack_look(b, bits) + + libogg.oggpack_look1.restype = c_long + libogg.oggpack_look1.argtypes = [b_p] + + def oggpack_look1(b): + return libogg.oggpack_look1(b) + + libogg.oggpack_adv.restype = None + libogg.oggpack_adv.argtypes = [b_p, c_int] + + def oggpack_adv(b, bits): + libogg.oggpack_adv(b, bits) + + libogg.oggpack_adv1.restype = None + libogg.oggpack_adv1.argtypes = [b_p] + + def oggpack_adv1(b): + libogg.oggpack_adv1(b) + + libogg.oggpack_read.restype = c_long + libogg.oggpack_read.argtypes = [b_p, c_int] + + def oggpack_read(b, bits): + return libogg.oggpack_read(b, bits) + + libogg.oggpack_read1.restype = c_long + libogg.oggpack_read1.argtypes = [b_p] + + def oggpack_read1(b): + return libogg.oggpack_read1(b) + + libogg.oggpack_bytes.restype = c_long + libogg.oggpack_bytes.argtypes = [b_p] + + def oggpack_bytes(b): + return libogg.oggpack_bytes(b) + + libogg.oggpack_bits.restype = c_long + libogg.oggpack_bits.argtypes = [b_p] + + def oggpack_bits(b): + return libogg.oggpack_bits(b) + + libogg.oggpack_get_buffer.restype = c_uchar_p + libogg.oggpack_get_buffer.argtypes = [b_p] + + def oggpack_get_buffer(b): + return libogg.oggpack_get_buffer(b) + + + + libogg.oggpackB_writeinit.restype = None + libogg.oggpackB_writeinit.argtypes = [b_p] + + def oggpackB_writeinit(b): + libogg.oggpackB_writeinit(b) + + try: + libogg.oggpackB_writecheck.restype = c_int + libogg.oggpackB_writecheck.argtypes = [b_p] + + def oggpackB_writecheck(b): + return libogg.oggpackB_writecheck(b) + except: + pass + + libogg.oggpackB_writetrunc.restype = None + libogg.oggpackB_writetrunc.argtypes = [b_p, c_long] + + def oggpackB_writetrunc(b, bits): + libogg.oggpackB_writetrunc(b, bits) + + libogg.oggpackB_writealign.restype = None + libogg.oggpackB_writealign.argtypes = [b_p] + + def oggpackB_writealign(b): + libogg.oggpackB_writealign(b) + + libogg.oggpackB_writecopy.restype = None + libogg.oggpackB_writecopy.argtypes = [b_p, c_void_p, c_long] + + def oggpackB_writecopy(b, source, bits): + libogg.oggpackB_writecopy(b, source, bits) + + libogg.oggpackB_reset.restype = None + libogg.oggpackB_reset.argtypes = [b_p] + + def oggpackB_reset(b): + libogg.oggpackB_reset(b) + + libogg.oggpackB_reset.restype = None + libogg.oggpackB_writeclear.argtypes = [b_p] + + def oggpackB_reset(b): + libogg.oggpackB_reset(b) + + libogg.oggpackB_readinit.restype = None + libogg.oggpackB_readinit.argtypes = [b_p, c_uchar_p, c_int] + + def oggpackB_readinit(b, buf, bytes): + libogg.oggpackB_readinit(b, buf, bytes) + + libogg.oggpackB_write.restype = None + libogg.oggpackB_write.argtypes = [b_p, c_ulong, c_int] + + def oggpackB_write(b, value, bits): + libogg.oggpackB_write(b, value, bits) + + libogg.oggpackB_look.restype = c_long + libogg.oggpackB_look.argtypes = [b_p, c_int] + + def oggpackB_look(b, bits): + return libogg.oggpackB_look(b, bits) + + libogg.oggpackB_look1.restype = c_long + libogg.oggpackB_look1.argtypes = [b_p] + + def oggpackB_look1(b): + return libogg.oggpackB_look1(b) + + libogg.oggpackB_adv.restype = None + libogg.oggpackB_adv.argtypes = [b_p, c_int] + + def oggpackB_adv(b, bits): + libogg.oggpackB_adv(b, bits) + + libogg.oggpackB_adv1.restype = None + libogg.oggpackB_adv1.argtypes = [b_p] + + def oggpackB_adv1(b): + libogg.oggpackB_adv1(b) + + libogg.oggpackB_read.restype = c_long + libogg.oggpackB_read.argtypes = [b_p, c_int] + + def oggpackB_read(b, bits): + return libogg.oggpackB_read(b, bits) + + libogg.oggpackB_read1.restype = c_long + libogg.oggpackB_read1.argtypes = [b_p] + + def oggpackB_read1(b): + return libogg.oggpackB_read1(b) + + libogg.oggpackB_bytes.restype = c_long + libogg.oggpackB_bytes.argtypes = [b_p] + + def oggpackB_bytes(b): + return libogg.oggpackB_bytes(b) + + libogg.oggpackB_bits.restype = c_long + libogg.oggpackB_bits.argtypes = [b_p] + + def oggpackB_bits(b): + return libogg.oggpackB_bits(b) + + libogg.oggpackB_get_buffer.restype = c_uchar_p + libogg.oggpackB_get_buffer.argtypes = [b_p] + + def oggpackB_get_buffer(b): + return libogg.oggpackB_get_buffer(b) + + + + libogg.ogg_stream_packetin.restype = c_int + libogg.ogg_stream_packetin.argtypes = [os_p, op_p] + + def ogg_stream_packetin(os, op): + return libogg.ogg_stream_packetin(os, op) + + try: + libogg.ogg_stream_iovecin.restype = c_int + libogg.ogg_stream_iovecin.argtypes = [os_p, iov_p, c_int, c_long, ogg_int64_t] + + def ogg_stream_iovecin(os, iov, count, e_o_s, granulepos): + return libogg.ogg_stream_iovecin(os, iov, count, e_o_s, granulepos) + except: + pass + + libogg.ogg_stream_pageout.restype = c_int + libogg.ogg_stream_pageout.argtypes = [os_p, og_p] + + def ogg_stream_pageout(os, og): + return libogg.ogg_stream_pageout(os, og) + + try: + libogg.ogg_stream_pageout_fill.restype = c_int + libogg.ogg_stream_pageout_fill.argtypes = [os_p, og_p, c_int] + def ogg_stream_pageout_fill(os, og, nfill): + return libogg.ogg_stream_pageout_fill(os, og, nfill) + except: + pass + + libogg.ogg_stream_flush.restype = c_int + libogg.ogg_stream_flush.argtypes = [os_p, og_p] + + def ogg_stream_flush(os, og): + return libogg.ogg_stream_flush(os, og) + + try: + libogg.ogg_stream_flush_fill.restype = c_int + libogg.ogg_stream_flush_fill.argtypes = [os_p, og_p, c_int] + def ogg_stream_flush_fill(os, og, nfill): + return libogg.ogg_stream_flush_fill(os, og, nfill) + except: + pass + + + + libogg.ogg_sync_init.restype = c_int + libogg.ogg_sync_init.argtypes = [oy_p] + + def ogg_sync_init(oy): + return libogg.ogg_sync_init(oy) + + libogg.ogg_sync_clear.restype = c_int + libogg.ogg_sync_clear.argtypes = [oy_p] + + def ogg_sync_clear(oy): + return libogg.ogg_sync_clear(oy) + + libogg.ogg_sync_reset.restype = c_int + libogg.ogg_sync_reset.argtypes = [oy_p] + + def ogg_sync_reset(oy): + return libogg.ogg_sync_reset(oy) + + libogg.ogg_sync_destroy.restype = c_int + libogg.ogg_sync_destroy.argtypes = [oy_p] + + def ogg_sync_destroy(oy): + return libogg.ogg_sync_destroy(oy) + + try: + libogg.ogg_sync_check.restype = c_int + libogg.ogg_sync_check.argtypes = [oy_p] + def ogg_sync_check(oy): + return libogg.ogg_sync_check(oy) + except: + pass + + + + libogg.ogg_sync_buffer.restype = c_char_p + libogg.ogg_sync_buffer.argtypes = [oy_p, c_long] + + def ogg_sync_buffer(oy, size): + return libogg.ogg_sync_buffer(oy, size) + + libogg.ogg_sync_wrote.restype = c_int + libogg.ogg_sync_wrote.argtypes = [oy_p, c_long] + + def ogg_sync_wrote(oy, bytes): + return libogg.ogg_sync_wrote(oy, bytes) + + libogg.ogg_sync_pageseek.restype = c_int + libogg.ogg_sync_pageseek.argtypes = [oy_p, og_p] + + def ogg_sync_pageseek(oy, og): + return libogg.ogg_sync_pageseek(oy, og) + + libogg.ogg_sync_pageout.restype = c_long + libogg.ogg_sync_pageout.argtypes = [oy_p, og_p] + + def ogg_sync_pageout(oy, og): + return libogg.ogg_sync_pageout(oy, og) + + libogg.ogg_stream_pagein.restype = c_int + libogg.ogg_stream_pagein.argtypes = [os_p, og_p] + + def ogg_stream_pagein(os, og): + return libogg.ogg_stream_pagein(oy, og) + + libogg.ogg_stream_packetout.restype = c_int + libogg.ogg_stream_packetout.argtypes = [os_p, op_p] + + def ogg_stream_packetout(os, op): + return libogg.ogg_stream_packetout(oy, op) + + libogg.ogg_stream_packetpeek.restype = c_int + libogg.ogg_stream_packetpeek.argtypes = [os_p, op_p] + + def ogg_stream_packetpeek(os, op): + return libogg.ogg_stream_packetpeek(os, op) + + + + libogg.ogg_stream_init.restype = c_int + libogg.ogg_stream_init.argtypes = [os_p, c_int] + + def ogg_stream_init(os, serialno): + return libogg.ogg_stream_init(os, serialno) + + libogg.ogg_stream_clear.restype = c_int + libogg.ogg_stream_clear.argtypes = [os_p] + + def ogg_stream_clear(os): + return libogg.ogg_stream_clear(os) + + libogg.ogg_stream_reset.restype = c_int + libogg.ogg_stream_reset.argtypes = [os_p] + + def ogg_stream_reset(os): + return libogg.ogg_stream_reset(os) + + libogg.ogg_stream_reset_serialno.restype = c_int + libogg.ogg_stream_reset_serialno.argtypes = [os_p, c_int] + + def ogg_stream_reset_serialno(os, serialno): + return libogg.ogg_stream_reset_serialno(os, serialno) + + libogg.ogg_stream_destroy.restype = c_int + libogg.ogg_stream_destroy.argtypes = [os_p] + + def ogg_stream_destroy(os): + return libogg.ogg_stream_destroy(os) + + try: + libogg.ogg_stream_check.restype = c_int + libogg.ogg_stream_check.argtypes = [os_p] + def ogg_stream_check(os): + return libogg.ogg_stream_check(os) + except: + pass + + libogg.ogg_stream_eos.restype = c_int + libogg.ogg_stream_eos.argtypes = [os_p] + + def ogg_stream_eos(os): + return libogg.ogg_stream_eos(os) + + + + libogg.ogg_page_checksum_set.restype = None + libogg.ogg_page_checksum_set.argtypes = [og_p] + + def ogg_page_checksum_set(og): + libogg.ogg_page_checksum_set(og) + + + + libogg.ogg_page_version.restype = c_int + libogg.ogg_page_version.argtypes = [og_p] + + def ogg_page_version(og): + return libogg.ogg_page_version(og) + + libogg.ogg_page_continued.restype = c_int + libogg.ogg_page_continued.argtypes = [og_p] + + def ogg_page_continued(og): + return libogg.ogg_page_continued(og) + + libogg.ogg_page_bos.restype = c_int + libogg.ogg_page_bos.argtypes = [og_p] + + def ogg_page_bos(og): + return libogg.ogg_page_bos(og) + + libogg.ogg_page_eos.restype = c_int + libogg.ogg_page_eos.argtypes = [og_p] + + def ogg_page_eos(og): + return libogg.ogg_page_eos(og) + + libogg.ogg_page_granulepos.restype = ogg_int64_t + libogg.ogg_page_granulepos.argtypes = [og_p] + + def ogg_page_granulepos(og): + return libogg.ogg_page_granulepos(og) + + libogg.ogg_page_serialno.restype = c_int + libogg.ogg_page_serialno.argtypes = [og_p] + + def ogg_page_serialno(og): + return libogg.ogg_page_serialno(og) + + libogg.ogg_page_pageno.restype = c_long + libogg.ogg_page_pageno.argtypes = [og_p] + + def ogg_page_pageno(og): + return libogg.ogg_page_pageno(og) + + libogg.ogg_page_packets.restype = c_int + libogg.ogg_page_packets.argtypes = [og_p] + + def ogg_page_packets(og): + return libogg.ogg_page_packets(og) + + + + libogg.ogg_packet_clear.restype = None + libogg.ogg_packet_clear.argtypes = [op_p] + + def ogg_packet_clear(op): + libogg.ogg_packet_clear(op) diff --git a/sbapp/pyogg/ogg_opus_writer.py b/sbapp/pyogg/ogg_opus_writer.py new file mode 100644 index 0000000..547d0f5 --- /dev/null +++ b/sbapp/pyogg/ogg_opus_writer.py @@ -0,0 +1,421 @@ +import builtins +import copy +import ctypes +import random +import struct +from typing import ( + Optional, + Union, + BinaryIO +) + +from . import ogg +from . import opus +from .opus_buffered_encoder import OpusBufferedEncoder +#from .opus_encoder import OpusEncoder +from .pyogg_error import PyOggError + +class OggOpusWriter(): + """Encodes PCM data into an OggOpus file.""" + + def __init__(self, + f: Union[BinaryIO, str], + encoder: OpusBufferedEncoder, + custom_pre_skip: Optional[int] = None) -> None: + """Construct an OggOpusWriter. + + f may be either a string giving the path to the file, or + an already-opened file handle. + + If f is an already-opened file handle, then it is the + user's responsibility to close the file when they are + finished with it. The file should be opened for writing + in binary (not text) mode. + + The encoder should be a + OpusBufferedEncoder and should be fully configured before the + first call to the `write()` method. + + The Opus encoder requires an amount of "warm up" and when + stored in an Ogg container that warm up can be skipped. When + `custom_pre_skip` is None, the required amount of warm up + silence is automatically calculated and inserted. If a custom + (non-silent) pre-skip is desired, then `custom_pre_skip` + should be specified as the number of samples (per channel). + It is then the user's responsibility to pass the non-silent + pre-skip samples to `encode()`. + + """ + # Store the Opus encoder + self._encoder = encoder + + # Store the custom pre skip + self._custom_pre_skip = custom_pre_skip + + # Create a new stream state with a random serial number + self._stream_state = self._create_stream_state() + + # Create a packet (reused for each pass) + self._ogg_packet = ogg.ogg_packet() + self._packet_valid = False + + # Create a page (reused for each pass) + self._ogg_page = ogg.ogg_page() + + # Counter for the number of packets written into Ogg stream + self._count_packets = 0 + + # Counter for the number of samples encoded into Opus + # packets + self._count_samples = 0 + + # Flag to indicate if the headers have been written + self._headers_written = False + + # Flag to indicate that the stream has been finished (the + # EOS bit was set in a final packet) + self._finished = False + + # Reference to the current encoded packet (written only + # when we know if it the last) + self._current_encoded_packet: Optional[bytes] = None + + # Open file if required. Given this may raise an exception, + # it should be the last step of initialisation. + self._i_opened_the_file = False + if isinstance(f, str): + self._file = builtins.open(f, 'wb') + self._i_opened_the_file = True + else: + # Assume it's already opened file + self._file = f + + def __del__(self) -> None: + if not self._finished: + self.close() + + # + # User visible methods + # + + def write(self, pcm: memoryview) -> None: + """Encode the PCM and write out the Ogg Opus stream. + + Encoders the PCM using the provided encoder. + + """ + # Check that the stream hasn't already been finished + if self._finished: + raise PyOggError( + "Stream has already ended. Perhaps close() was "+ + "called too early?") + + # If we haven't already written out the headers, do so + # now. Then, write a frame of silence to warm up the + # encoder. + if not self._headers_written: + pre_skip = self._write_headers(self._custom_pre_skip) + if self._custom_pre_skip is None: + self._write_silence(pre_skip) + + # Call the internal method to encode the bytes + self._write_to_oggopus(pcm) + + + def _write_to_oggopus(self, pcm: memoryview, flush: bool = False) -> None: + assert self._encoder is not None + + def handle_encoded_packet(encoded_packet: memoryview, + samples: int, + end_of_stream: bool) -> None: + # Cast memoryview to ctypes Array + Buffer = ctypes.c_ubyte * len(encoded_packet) + encoded_packet_ctypes = Buffer.from_buffer(encoded_packet) + + # Obtain a pointer to the encoded packet + encoded_packet_ptr = ctypes.cast( + encoded_packet_ctypes, + ctypes.POINTER(ctypes.c_ubyte) + ) + + # Increase the count of the number of samples written + self._count_samples += samples + + # Place data into the packet + self._ogg_packet.packet = encoded_packet_ptr + self._ogg_packet.bytes = len(encoded_packet) + self._ogg_packet.b_o_s = 0 + self._ogg_packet.e_o_s = end_of_stream + self._ogg_packet.granulepos = self._count_samples + self._ogg_packet.packetno = self._count_packets + + # Increase the counter of the number of packets + # in the stream + self._count_packets += 1 + + # Write the packet into the stream + self._write_packet() + + + # Encode the PCM data into an Opus packet + self._encoder.buffered_encode( + pcm, + flush=flush, + callback=handle_encoded_packet + ) + + def close(self) -> None: + # Check we haven't already closed this stream + if self._finished: + # We're attempting to close an already closed stream, + # do nothing more. + return + + # Flush the underlying buffered encoder + self._write_to_oggopus(memoryview(bytearray(b"")), flush=True) + + # The current packet must be the end of the stream, update + # the packet's details + self._ogg_packet.e_o_s = 1 + + # Write the packet to the stream + if self._packet_valid: + self._write_packet() + + # Flush the stream of any unwritten pages + self._flush() + + # Mark the stream as finished + self._finished = True + + # Close the file if we opened it + if self._i_opened_the_file: + self._file.close() + self._i_opened_the_file = False + + # Clean up the Ogg-related memory + ogg.ogg_stream_clear(self._stream_state) + + # Clean up the reference to the encoded packet (as it must + # now have been written) + del self._current_encoded_packet + + # + # Internal methods + # + + def _create_random_serial_no(self) -> ctypes.c_int: + sizeof_c_int = ctypes.sizeof(ctypes.c_int) + min_int = -2**(sizeof_c_int*8-1) + max_int = 2**(sizeof_c_int*8-1)-1 + serial_no = ctypes.c_int(random.randint(min_int, max_int)) + + return serial_no + + def _create_stream_state(self) -> ogg.ogg_stream_state: + # Create a random serial number + serial_no = self._create_random_serial_no() + + # Create an ogg_stream_state + ogg_stream_state = ogg.ogg_stream_state() + + # Initialise the stream state + ogg.ogg_stream_init( + ctypes.pointer(ogg_stream_state), + serial_no + ) + + return ogg_stream_state + + def _make_identification_header(self, pre_skip: int, input_sampling_rate: int = 0) -> bytes: + """Make the OggOpus identification header. + + An input_sampling rate may be set to zero to mean 'unspecified'. + + Only channel mapping family 0 is currently supported. + This allows mono and stereo signals. + + See https://tools.ietf.org/html/rfc7845#page-12 for more + details. + + """ + signature = b"OpusHead" + version = 1 + output_channels = self._encoder._channels + output_gain = 0 + channel_mapping_family = 0 + data = struct.pack( + " int: + """ Returns pre-skip. """ + if custom_pre_skip is not None: + # Use the user-specified amount of pre-skip + pre_skip = custom_pre_skip + else: + # Obtain the algorithmic delay of the Opus encoder. See + # https://tools.ietf.org/html/rfc7845#page-27 + delay_samples = self._encoder.get_algorithmic_delay() + + # Extra samples are recommended. See + # https://tools.ietf.org/html/rfc7845#page-27 + extra_samples = 120 + + # We will just fill a whole frame with silence. Calculate + # the minimum frame length, which we'll use as the + # pre-skip. + frame_durations = [2.5, 5, 10, 20, 40, 60] # milliseconds + frame_lengths = [ + x * self._encoder._samples_per_second // 1000 + for x in frame_durations + ] + for frame_length in frame_lengths: + if frame_length > delay_samples + extra_samples: + pre_skip = frame_length + break + + # Create the identification header + id_header = self._make_identification_header( + pre_skip = pre_skip + ) + + # Specify the packet containing the identification header + self._ogg_packet.packet = ctypes.cast(id_header, ogg.c_uchar_p) # type: ignore + self._ogg_packet.bytes = len(id_header) + self._ogg_packet.b_o_s = 1 + self._ogg_packet.e_o_s = 0 + self._ogg_packet.granulepos = 0 + self._ogg_packet.packetno = self._count_packets + self._count_packets += 1 + + # Write the identification header + result = ogg.ogg_stream_packetin( + self._stream_state, + self._ogg_packet + ) + + if result != 0: + raise PyOggError( + "Failed to write Opus identification header" + ) + + return pre_skip + + def _make_comment_header(self): + """Make the OggOpus comment header. + + See https://tools.ietf.org/html/rfc7845#page-22 for more + details. + + """ + signature = b"OpusTags" + vendor_string = b"ENCODER=PyOgg" + vendor_string_length = struct.pack(" None: + super().__init__() + + self._frame_size_ms: Optional[float] = None + self._frame_size_bytes: Optional[int] = None + + # Buffer contains the bytes required for the next + # frame. + self._buffer: Optional[ctypes.Array] = None + + # Location of the next free byte in the buffer + self._buffer_index = 0 + + + def set_frame_size(self, frame_size: float) -> None: + """ Set the desired frame duration (in milliseconds). + + Valid options are 2.5, 5, 10, 20, 40, or 60ms. + + """ + + # Ensure the frame size is valid. Compare frame size in + # units of 0.1ms to avoid floating point comparison + if int(frame_size*10) not in [25, 50, 100, 200, 400, 600]: + raise PyOggError( + "Frame size ({:f}) not one of ".format(frame_size)+ + "the acceptable values" + ) + + self._frame_size_ms = frame_size + + self._calc_frame_size() + + + def set_sampling_frequency(self, samples_per_second: int) -> None: + super().set_sampling_frequency(samples_per_second) + self._calc_frame_size() + + + def buffered_encode(self, + pcm_bytes: memoryview, + flush: bool = False, + callback: Callable[[memoryview,int,bool],None] = None + ) -> List[Tuple[memoryview, int, bool]]: + """Gets encoded packets and their number of samples. + + This method returns a list, where each item in the list is + a tuple. The first item in the tuple is an Opus-encoded + frame stored as a bytes-object. The second item in the + tuple is the number of samples encoded (excluding + silence). + + If `callback` is supplied then this method will instead + return an empty list but call the callback for every + Opus-encoded frame that would have been returned as a + list. This option has the desireable property of + eliminating the copying of the encoded packets, which is + required in order to form a list. The callback should + take two arguments, the encoded frame (a Python bytes + object) and the number of samples encoded per channel (an + int). The user must either process or copy the data as + the data may be overwritten once the callback terminates. + + """ + # If there's no work to do return immediately + if len(pcm_bytes) == 0 and flush == False: + return [] # no work to do + + # Sanity checks + if self._frame_size_ms is None: + raise PyOggError("Frame size must be set before encoding") + assert self._frame_size_bytes is not None + assert self._channels is not None + assert self._buffer is not None + assert self._buffer_index is not None + + # Local variable initialisation + results = [] + pcm_index = 0 + pcm_len = len(pcm_bytes) + + # 'Cast' memoryview of PCM to ctypes Array + Buffer = ctypes.c_ubyte * len(pcm_bytes) + try: + pcm_ctypes = Buffer.from_buffer(pcm_bytes) + except TypeError: + warnings.warn( + "Because PCM was read-only, an extra memory "+ + "copy was required; consider storing PCM in "+ + "writable memory (for example, bytearray "+ + "rather than bytes)." + ) + pcm_ctypes = Buffer.from_buffer(pcm_bytes) + + # Either store the encoded packet to return at the end of the + # method or immediately call the callback with the encoded + # packet. + def store_or_callback(encoded_packet: memoryview, + samples: int, + end_of_stream: bool = False) -> None: + if callback is None: + # Store the result + results.append(( + encoded_packet, + samples, + end_of_stream + )) + else: + # Call the callback + callback( + encoded_packet, + samples, + end_of_stream + ) + + # Fill the remainder of the buffer with silence and encode it. + # The associated number of samples are only that of actual + # data, not the added silence. + def flush_buffer() -> None: + # Sanity checks to satisfy mypy + assert self._buffer_index is not None + assert self._channels is not None + assert self._buffer is not None + + # If the buffer is already empty, we have no work to do + if self._buffer_index == 0: + return + + # Store the number of samples currently in the buffer + samples = ( + self._buffer_index + // self._channels + // ctypes.sizeof(opus.opus_int16) + ) + + # Fill the buffer with silence + ctypes.memset( + # destination + ctypes.byref(self._buffer, self._buffer_index), + # value + 0, + # count + len(self._buffer) - self._buffer_index + ) + + # Encode the PCM + # As at 2020-11-05, mypy is unaware that ctype Arrays + # support the buffer protocol. + encoded_packet = self.encode(memoryview(self._buffer)) # type: ignore + + # Either store the encoded packet or call the + # callback + store_or_callback(encoded_packet, samples, True) + + + # Copy the data remaining from the provided PCM into the + # buffer. Flush if required. + def copy_insufficient_data() -> None: + # Sanity checks to satisfy mypy + assert self._buffer is not None + + # Calculate remaining data + remaining_data = len(pcm_bytes) - pcm_index + + # Copy the data into the buffer. + ctypes.memmove( + # destination + ctypes.byref(self._buffer, self._buffer_index), + # source + ctypes.byref(pcm_ctypes, pcm_index), + # count + remaining_data + ) + + self._buffer_index += remaining_data + + # If we've been asked to flush the buffer then do so + if flush: + flush_buffer() + + # Loop through the provided PCM and the current buffer, + # encoding as we have full packets. + while True: + # There are two possibilities at this point: either we + # have previously unencoded data still in the buffer or we + # do not + if self._buffer_index == 0: + # We do not have unencoded data + + # We are free to progress through the PCM that has + # been provided encoding frames without copying any + # bytes. Once there is insufficient data remaining + # for a complete frame, that data should be copied + # into the buffer and we have finished. + if pcm_len - pcm_index > self._frame_size_bytes: + # We have enough data remaining in the provided + # PCM to encode more than an entire frame without + # copying any data. Unfortunately, splicing a + # ctypes array copies the array. To avoid the + # copy we use memoryview see + # https://mattgwwalker.wordpress.com/2020/12/12/python-ctypes-slicing/ + frame_data = memoryview(pcm_bytes)[ + pcm_index:pcm_index+self._frame_size_bytes + ] + + # Update the PCM index + pcm_index += self._frame_size_bytes + + # Store number of samples (per channel) of actual + # data + samples = ( + len(frame_data) + // self._channels + // ctypes.sizeof(opus.opus_int16) + ) + + # Encode the PCM + encoded_packet = super().encode(frame_data) + + # Either store the encoded packet or call the + # callback + store_or_callback(encoded_packet, samples) + + else: + # We do not have enough data to fill a frame while + # still having data left over. Copy the data into + # the buffer. + copy_insufficient_data() + return results + + else: + # We have unencoded data. + + # Copy the provided PCM into the buffer (up until the + # buffer is full). If we can fill it, then we can + # encode the filled buffer and continue. If we can't + # fill it then we've finished. + data_required = len(self._buffer) - self._buffer_index + if pcm_len > data_required: + # We have sufficient data to fill the buffer and + # have data left over. Copy data into the buffer. + assert pcm_index == 0 + remaining = len(self._buffer) - self._buffer_index + ctypes.memmove( + # destination + ctypes.byref(self._buffer, self._buffer_index), + # source + pcm_ctypes, + # count + remaining + ) + pcm_index += remaining + self._buffer_index += remaining + assert self._buffer_index == len(self._buffer) + + # Encode the PCM + encoded_packet = super().encode( + # Memoryviews of ctypes do work, even though + # mypy complains. + memoryview(self._buffer) # type: ignore + ) + + # Store number of samples (per channel) of actual + # data + samples = ( + self._buffer_index + // self._channels + // ctypes.sizeof(opus.opus_int16) + ) + + # We've now processed the buffer + self._buffer_index = 0 + + # Either store the encoded packet or call the + # callback + store_or_callback(encoded_packet, samples) + else: + # We have insufficient data to fill the buffer + # while still having data left over. Copy the + # data into the buffer. + copy_insufficient_data() + return results + + + def _calc_frame_size(self): + """Calculates the number of bytes in a frame. + + If the frame size (in milliseconds) and the number of + samples per seconds have already been specified, then the + frame size in bytes is set. Otherwise, this method does + nothing. + + The frame size is measured in bytes required to store the + sample. + + """ + if (self._frame_size_ms is None + or self._samples_per_second is None): + return + + self._frame_size_bytes = ( + self._frame_size_ms + * self._samples_per_second + // 1000 + * ctypes.sizeof(opus.opus_int16) + * self._channels + ) + + # Allocate space for the buffer + Buffer = ctypes.c_ubyte * self._frame_size_bytes + self._buffer = Buffer() + + + def _get_next_frame(self, add_silence=False): + """Gets the next Opus-encoded frame. + + Returns a tuple where the first item is the Opus-encoded + frame and the second item is the number of encoded samples + (per channel). + + Returns None if insufficient data is available. + + """ + next_frame = bytes() + samples = 0 + + # Ensure frame size has been specified + if self._frame_size_bytes is None: + raise PyOggError( + "Desired frame size hasn't been set. Perhaps "+ + "encode() was called before set_frame_size() "+ + "and set_sampling_frequency()?" + ) + + # Check if there's insufficient data in the buffer to fill + # a frame. + if self._frame_size_bytes > self._buffer_size: + if len(self._buffer) == 0: + # No data at all in buffer + return None + if add_silence: + # Get all remaining data + while len(self._buffer) != 0: + next_frame += self._buffer.popleft() + self._buffer_size = 0 + # Store number of samples (per channel) of actual + # data + samples = ( + len(next_frame) + // self._channels + // ctypes.sizeof(opus.opus_int16) + ) + # Fill remainder of frame with silence + bytes_remaining = self._frame_size_bytes - len(next_frame) + next_frame += b'\x00' * bytes_remaining + return (next_frame, samples) + else: + # Insufficient data to fill a frame and we're not + # adding silence + return None + + bytes_remaining = self._frame_size_bytes + while bytes_remaining > 0: + if len(self._buffer[0]) <= bytes_remaining: + # Take the whole first item + buffer_ = self._buffer.popleft() + next_frame += buffer_ + bytes_remaining -= len(buffer_) + self._buffer_size -= len(buffer_) + else: + # Take only part of the buffer + + # TODO: This could be more efficiently + # implemented. Rather than appending back the + # remaining data, we could just update an index + # saying where we were up to in regards to the + # first entry of the buffer. + buffer_ = self._buffer.popleft() + next_frame += buffer_[:bytes_remaining] + self._buffer_size -= bytes_remaining + # And put the unused part back into the buffer + self._buffer.appendleft(buffer_[bytes_remaining:]) + bytes_remaining = 0 + + # Calculate number of samples (per channel) + samples = ( + len(next_frame) + // self._channels + // ctypes.sizeof(opus.opus_int16) + ) + + return (next_frame, samples) diff --git a/sbapp/pyogg/opus_decoder.py b/sbapp/pyogg/opus_decoder.py new file mode 100644 index 0000000..8a1f4dd --- /dev/null +++ b/sbapp/pyogg/opus_decoder.py @@ -0,0 +1,273 @@ +import ctypes + +from . import opus +from .pyogg_error import PyOggError + +class OpusDecoder: + def __init__(self): + self._decoder = None + self._channels = None + self._samples_per_second = None + self._pcm_buffer = None + self._pcm_buffer_ptr = None + self._pcm_buffer_size_int = None + + # TODO: Check if there is clean up that we need to do when + # closing a decoder. + + # + # User visible methods + # + + def set_channels(self, n): + + """Set the number of channels. + + n must be either 1 or 2. + + The decoder is capable of filling in either mono or + interleaved stereo pcm buffers. + + """ + if self._decoder is None: + if n < 0 or n > 2: + raise PyOggError( + "Invalid number of channels in call to "+ + "set_channels()" + ) + self._channels = n + else: + raise PyOggError( + "Cannot change the number of channels after "+ + "the decoder was created. Perhaps "+ + "set_channels() was called after decode()?" + ) + self._create_pcm_buffer() + + def set_sampling_frequency(self, samples_per_second): + """Set the number of samples (per channel) per second. + + samples_per_second must be one of 8000, 12000, 16000, + 24000, or 48000. + + Internally Opus stores data at 48000 Hz, so that should be + the default value for Fs. However, the decoder can + efficiently decode to buffers at 8, 12, 16, and 24 kHz so + if for some reason the caller cannot use data at the full + sample rate, or knows the compressed data doesn't use the + full frequency range, it can request decoding at a reduced + rate. + + """ + if self._decoder is None: + if samples_per_second in [8000, 12000, 16000, 24000, 48000]: + self._samples_per_second = samples_per_second + else: + raise PyOggError( + "Specified sampling frequency "+ + "({:d}) ".format(samples_per_second)+ + "was not one of the accepted values" + ) + else: + raise PyOggError( + "Cannot change the sampling frequency after "+ + "the decoder was created. Perhaps "+ + "set_sampling_frequency() was called after decode()?" + ) + self._create_pcm_buffer() + + def decode(self, encoded_bytes: memoryview): + """Decodes an Opus-encoded packet into PCM. + + """ + # If we haven't already created a decoder, do so now + if self._decoder is None: + self._decoder = self._create_decoder() + + # Create a ctypes array from the memoryview (without copying + # data) + Buffer = ctypes.c_char * len(encoded_bytes) + encoded_bytes_ctypes = Buffer.from_buffer(encoded_bytes) + + # Create pointer to encoded bytes + encoded_bytes_ptr = ctypes.cast( + encoded_bytes_ctypes, + ctypes.POINTER(ctypes.c_ubyte) + ) + + # Store length of encoded bytes into int32 + len_int32 = opus.opus_int32( + len(encoded_bytes) + ) + + # Check that we have a PCM buffer + if self._pcm_buffer is None: + raise PyOggError("PCM buffer was not configured.") + + # Decode the encoded frame + result = opus.opus_decode( + self._decoder, + encoded_bytes_ptr, + len_int32, + self._pcm_buffer_ptr, + self._pcm_buffer_size_int, + 0 # TODO: What's Forward Error Correction about? + ) + + # Check for any errors + if result < 0: + raise PyOggError( + "An error occurred while decoding an Opus-encoded "+ + "packet: "+ + opus.opus_strerror(result).decode("utf") + ) + + # Extract just the valid data as bytes + end_valid_data = ( + result + * ctypes.sizeof(opus.opus_int16) + * self._channels + ) + + # Create memoryview of PCM buffer to avoid copying data during slice. + mv = memoryview(self._pcm_buffer) + + # Cast memoryview to chars + mv = mv.cast('c') + + # Slice memoryview to extract only valid data + mv = mv[:end_valid_data] + + return mv + + + def decode_missing_packet(self, frame_duration): + """ Obtain PCM data despite missing a frame. + + frame_duration is in milliseconds. + + """ + + # Consider frame duration in units of 0.1ms in order to + # avoid floating-point comparisons. + if int(frame_duration*10) not in [25, 50, 100, 200, 400, 600]: + raise PyOggError( + "Frame duration ({:f}) is not one of the accepted values".format(frame_duration) + ) + + # Calculate frame size + frame_size = int( + frame_duration + * self._samples_per_second + // 1000 + ) + + # Store frame size as int + frame_size_int = ctypes.c_int(frame_size) + + # Decode missing packet + result = opus.opus_decode( + self._decoder, + None, + 0, + self._pcm_buffer_ptr, + frame_size_int, + 0 # TODO: What is this Forward Error Correction about? + ) + + # Check for any errors + if result < 0: + raise PyOggError( + "An error occurred while decoding an Opus-encoded "+ + "packet: "+ + opus.opus_strerror(result).decode("utf") + ) + + # Extract just the valid data as bytes + end_valid_data = ( + result + * ctypes.sizeof(opus.opus_int16) + * self._channels + ) + return bytes(self._pcm_buffer)[:end_valid_data] + + # + # Internal methods + # + + def _create_pcm_buffer(self): + if (self._samples_per_second is None + or self._channels is None): + # We cannot define the buffer yet + return + + # Create buffer to hold 120ms of samples. See "opus_decode()" at + # https://opus-codec.org/docs/opus_api-1.3.1/group__opus__decoder.html + max_duration = 120 # milliseconds + max_samples = max_duration * self._samples_per_second // 1000 + PCMBuffer = opus.opus_int16 * (max_samples * self._channels) + self._pcm_buffer = PCMBuffer() + self._pcm_buffer_ptr = ( + ctypes.cast(ctypes.pointer(self._pcm_buffer), + ctypes.POINTER(opus.opus_int16)) + ) + + # Store samples per channel in an int + self._pcm_buffer_size_int = ctypes.c_int(max_samples) + + def _create_decoder(self): + # To create a decoder, we must first allocate resources for it. + # We want Python to be responsible for the memory deallocation, + # and thus Python must be responsible for the initial memory + # allocation. + + # Check that the sampling frequency has been defined + if self._samples_per_second is None: + raise PyOggError( + "The sampling frequency was not specified before "+ + "attempting to create an Opus decoder. Perhaps "+ + "decode() was called before set_sampling_frequency()?" + ) + + # The sampling frequency must be passed in as a 32-bit int + samples_per_second = opus.opus_int32(self._samples_per_second) + + # Check that the number of channels has been defined + if self._channels is None: + raise PyOggError( + "The number of channels were not specified before "+ + "attempting to create an Opus decoder. Perhaps "+ + "decode() was called before set_channels()?" + ) + + # The number of channels must also be passed in as a 32-bit int + channels = opus.opus_int32(self._channels) + + # Obtain the number of bytes of memory required for the decoder + size = opus.opus_decoder_get_size(channels); + + # Allocate the required memory for the decoder + memory = ctypes.create_string_buffer(size) + + # Cast the newly-allocated memory as a pointer to a decoder. We + # could also have used opus.od_p as the pointer type, but writing + # it out in full may be clearer. + decoder = ctypes.cast(memory, ctypes.POINTER(opus.OpusDecoder)) + + # Initialise the decoder + error = opus.opus_decoder_init( + decoder, + samples_per_second, + channels + ); + + # Check that there hasn't been an error when initialising the + # decoder + if error != opus.OPUS_OK: + raise PyOggError( + "An error occurred while creating the decoder: "+ + opus.opus_strerror(error).decode("utf") + ) + + # Return our newly-created decoder + return decoder diff --git a/sbapp/pyogg/opus_encoder.py b/sbapp/pyogg/opus_encoder.py new file mode 100644 index 0000000..1da82da --- /dev/null +++ b/sbapp/pyogg/opus_encoder.py @@ -0,0 +1,358 @@ +import ctypes +from typing import Optional, Union, ByteString + +from . import opus +from .pyogg_error import PyOggError + +class OpusEncoder: + """Encodes PCM data into Opus frames.""" + def __init__(self) -> None: + self._encoder: Optional[ctypes.pointer] = None + self._channels: Optional[int] = None + self._samples_per_second: Optional[int] = None + self._application: Optional[int] = None + self._max_bytes_per_frame: Optional[opus.opus_int32] = None + self._output_buffer: Optional[ctypes.Array] = None + self._output_buffer_ptr: Optional[ctypes.pointer] = None + + # An output buffer of 4,000 bytes is recommended in + # https://opus-codec.org/docs/opus_api-1.3.1/group__opus__encoder.html + self.set_max_bytes_per_frame(4000) + + # + # User visible methods + # + + def set_channels(self, n: int) -> None: + """Set the number of channels. + + n must be either 1 or 2. + + """ + if self._encoder is None: + if n < 0 or n > 2: + raise PyOggError( + "Invalid number of channels in call to "+ + "set_channels()" + ) + self._channels = n + else: + raise PyOggError( + "Cannot change the number of channels after "+ + "the encoder was created. Perhaps "+ + "set_channels() was called after encode()?" + ) + + def set_sampling_frequency(self, samples_per_second: int) -> None: + """Set the number of samples (per channel) per second. + + This must be one of 8000, 12000, 16000, 24000, or 48000. + + Regardless of the sampling rate and number of channels + selected, the Opus encoder can switch to a lower audio + bandwidth or number of channels if the bitrate selected is + too low. This also means that it is safe to always use 48 + kHz stereo input and let the encoder optimize the + encoding. + + """ + if self._encoder is None: + if samples_per_second in [8000, 12000, 16000, 24000, 48000]: + self._samples_per_second = samples_per_second + else: + raise PyOggError( + "Specified sampling frequency "+ + "({:d}) ".format(samples_per_second)+ + "was not one of the accepted values" + ) + else: + raise PyOggError( + "Cannot change the sampling frequency after "+ + "the encoder was created. Perhaps "+ + "set_sampling_frequency() was called after encode()?" + ) + + def set_application(self, application: str) -> None: + """Set the encoding mode. + + This must be one of 'voip', 'audio', or 'restricted_lowdelay'. + + 'voip': Gives best quality at a given bitrate for voice + signals. It enhances the input signal by high-pass + filtering and emphasizing formants and + harmonics. Optionally it includes in-band forward error + correction to protect against packet loss. Use this mode + for typical VoIP applications. Because of the enhancement, + even at high bitrates the output may sound different from + the input. + + 'audio': Gives best quality at a given bitrate for most + non-voice signals like music. Use this mode for music and + mixed (music/voice) content, broadcast, and applications + requiring less than 15 ms of coding delay. + + 'restricted_lowdelay': configures low-delay mode that + disables the speech-optimized mode in exchange for + slightly reduced delay. This mode can only be set on an + newly initialized encoder because it changes the codec + delay. + """ + if self._encoder is not None: + raise PyOggError( + "Cannot change the application after "+ + "the encoder was created. Perhaps "+ + "set_application() was called after encode()?" + ) + if application == "voip": + self._application = opus.OPUS_APPLICATION_VOIP + elif application == "audio": + self._application = opus.OPUS_APPLICATION_AUDIO + elif application == "restricted_lowdelay": + self._application = opus.OPUS_APPLICATION_RESTRICTED_LOWDELAY + else: + raise PyOggError( + "The application specification '{:s}' ".format(application)+ + "wasn't one of the accepted values." + ) + + def set_max_bytes_per_frame(self, max_bytes: int) -> None: + """Set the maximum number of bytes in an encoded frame. + + Size of the output payload. This may be used to impose an + upper limit on the instant bitrate, but should not be used + as the only bitrate control. + + TODO: Use OPUS_SET_BITRATE to control the bitrate. + + """ + self._max_bytes_per_frame = opus.opus_int32(max_bytes) + OutputBuffer = ctypes.c_ubyte * max_bytes + self._output_buffer = OutputBuffer() + self._output_buffer_ptr = ( + ctypes.cast(ctypes.pointer(self._output_buffer), + ctypes.POINTER(ctypes.c_ubyte)) + ) + + + def encode(self, pcm: Union[bytes, bytearray, memoryview]) -> memoryview: + """Encodes PCM data into an Opus frame. + + `pcm` must be formatted as bytes-like, with each sample taking + two bytes (signed 16-bit integers; interleaved left, then + right channels if in stereo). + + If `pcm` is not writeable, a copy of the array will be made. + + """ + # If we haven't already created an encoder, do so now + if self._encoder is None: + self._encoder = self._create_encoder() + + # Sanity checks also satisfy mypy type checking + assert self._channels is not None + assert self._samples_per_second is not None + assert self._output_buffer is not None + + # Calculate the effective frame duration of the given PCM + # data. Calculate it in units of 0.1ms in order to avoid + # floating point comparisons. + bytes_per_sample = 2 + frame_size = ( + len(pcm) # bytes + // bytes_per_sample + // self._channels + ) + frame_duration = ( + (10*frame_size) + // (self._samples_per_second//1000) + ) + + # Check that we have a valid frame size + if int(frame_duration) not in [25, 50, 100, 200, 400, 600]: + raise PyOggError( + "The effective frame duration ({:.1f} ms) " + .format(frame_duration/10)+ + "was not one of the acceptable values." + ) + + # Create a ctypes object sharing the memory of the PCM data + PcmCtypes = ctypes.c_ubyte * len(pcm) + try: + # Attempt to share the PCM memory + + # Unfortunately, as at 2020-09-27, the type hinting for + # read-only and writeable buffer protocols was a + # work-in-progress. The following only works for writable + # cases, but the method's parameters include a read-only + # possibility (bytes), thus we ignore mypy's error. + pcm_ctypes = PcmCtypes.from_buffer(pcm) # type: ignore[arg-type] + except TypeError: + # The data must be copied if it's not writeable + pcm_ctypes = PcmCtypes.from_buffer_copy(pcm) + + # Create a pointer to the PCM data + pcm_ptr = ctypes.cast( + pcm_ctypes, + ctypes.POINTER(opus.opus_int16) + ) + + # Create an int giving the frame size per channel + frame_size_int = ctypes.c_int(frame_size) + + # Encode PCM + result = opus.opus_encode( + self._encoder, + pcm_ptr, + frame_size_int, + self._output_buffer_ptr, + self._max_bytes_per_frame + ) + + # Check for any errors + if result < 0: + raise PyOggError( + "An error occurred while encoding to Opus format: "+ + opus.opus_strerror(result).decode("utf") + ) + + # Get memoryview of buffer so that the slice operation doesn't + # copy the data. + # + # Unfortunately, as at 2020-09-27, the type hints for + # memoryview do not include ctype arrays. This is because + # there is no currently accepted manner to label a class as + # supporting the buffer protocol. However, it's clearly a + # work in progress. For more information, see: + # * https://bugs.python.org/issue27501 + # * https://github.com/python/typing/issues/593 + # * https://github.com/python/typeshed/pull/4232 + mv = memoryview(self._output_buffer) # type: ignore + + # Cast the memoryview to char + mv = mv.cast('c') + + # Slice just the valid data from the memoryview + valid_data_as_bytes = mv[:result] + + # DEBUG + # Convert memoryview back to ctypes instance + Buffer = ctypes.c_ubyte * len(valid_data_as_bytes) + buf = Buffer.from_buffer( valid_data_as_bytes ) + + # Convert PCM back to pointer and dump 4,000-byte buffer + ptr = ctypes.cast( + buf, + ctypes.POINTER(ctypes.c_ubyte) + ) + + return valid_data_as_bytes + + + def get_algorithmic_delay(self): + """Gets the total samples of delay added by the entire codec. + + This can be queried by the encoder and then the provided + number of samples can be skipped on from the start of the + decoder's output to provide time aligned input and + output. From the perspective of a decoding application the + real data begins this many samples late. + + The decoder contribution to this delay is identical for all + decoders, but the encoder portion of the delay may vary from + implementation to implementation, version to version, or even + depend on the encoder's initial configuration. Applications + needing delay compensation should call this method rather than + hard-coding a value. + + """ + # If we haven't already created an encoder, do so now + if self._encoder is None: + self._encoder = self._create_encoder() + + # Obtain the algorithmic delay of the Opus encoder. See + # https://tools.ietf.org/html/rfc7845#page-27 + delay = opus.opus_int32() + + result = opus.opus_encoder_ctl( + self._encoder, + opus.OPUS_GET_LOOKAHEAD_REQUEST, + ctypes.pointer(delay) + ) + if result != opus.OPUS_OK: + raise PyOggError( + "Failed to obtain the algorithmic delay of "+ + "the Opus encoder: "+ + opus.opus_strerror(result).decode("utf") + ) + delay_samples = delay.value + return delay_samples + + + # + # Internal methods + # + + def _create_encoder(self) -> ctypes.pointer: + # To create an encoder, we must first allocate resources for it. + # We want Python to be responsible for the memory deallocation, + # and thus Python must be responsible for the initial memory + # allocation. + + # Check that the application has been defined + if self._application is None: + raise PyOggError( + "The application was not specified before "+ + "attempting to create an Opus encoder. Perhaps "+ + "encode() was called before set_application()?" + ) + application = self._application + + # Check that the sampling frequency has been defined + if self._samples_per_second is None: + raise PyOggError( + "The sampling frequency was not specified before "+ + "attempting to create an Opus encoder. Perhaps "+ + "encode() was called before set_sampling_frequency()?" + ) + + # The frequency must be passed in as a 32-bit int + samples_per_second = opus.opus_int32(self._samples_per_second) + + # Check that the number of channels has been defined + if self._channels is None: + raise PyOggError( + "The number of channels were not specified before "+ + "attempting to create an Opus encoder. Perhaps "+ + "encode() was called before set_channels()?" + ) + channels = self._channels + + # Obtain the number of bytes of memory required for the encoder + size = opus.opus_encoder_get_size(channels); + + # Allocate the required memory for the encoder + memory = ctypes.create_string_buffer(size) + + # Cast the newly-allocated memory as a pointer to an encoder. We + # could also have used opus.oe_p as the pointer type, but writing + # it out in full may be clearer. + encoder = ctypes.cast(memory, ctypes.POINTER(opus.OpusEncoder)) + + # Initialise the encoder + error = opus.opus_encoder_init( + encoder, + samples_per_second, + channels, + application + ) + + # Check that there hasn't been an error when initialising the + # encoder + if error != opus.OPUS_OK: + raise PyOggError( + "An error occurred while creating the encoder: "+ + opus.opus_strerror(error).decode("utf") + ) + + # Return our newly-created encoder + return encoder diff --git a/sbapp/pyogg/opus_file.py b/sbapp/pyogg/opus_file.py new file mode 100644 index 0000000..f8519f4 --- /dev/null +++ b/sbapp/pyogg/opus_file.py @@ -0,0 +1,106 @@ +import ctypes + +from . import ogg +from . import opus +from .pyogg_error import PyOggError +from .audio_file import AudioFile + +class OpusFile(AudioFile): + def __init__(self, path: str) -> None: + # Open the file + error = ctypes.c_int() + of = opus.op_open_file( + ogg.to_char_p(path), + ctypes.pointer(error) + ) + + # Check for errors + if error.value != 0: + raise PyOggError( + ("File '{}' couldn't be opened or doesn't exist. "+ + "Error code: {}").format(path, error.value) + ) + + # Extract the number of channels in the newly opened file + #: Number of channels in audio file. + self.channels = opus.op_channel_count(of, -1) + + # Allocate sufficient memory to store the entire PCM + pcm_size = opus.op_pcm_total(of, -1) + Buf = opus.opus_int16*(pcm_size*self.channels) + buf = Buf() + + # Create a pointer to the newly allocated memory. It + # seems we can only do pointer arithmetic on void + # pointers. See + # https://mattgwwalker.wordpress.com/2020/05/30/pointer-manipulation-in-python/ + buf_ptr = ctypes.cast( + ctypes.pointer(buf), + ctypes.c_void_p + ) + assert buf_ptr.value is not None # for mypy + buf_ptr_zero = buf_ptr.value + + #: Bytes per sample + self.bytes_per_sample = ctypes.sizeof(opus.opus_int16) + + # Read through the entire file, copying the PCM into the + # buffer + samples = 0 + while True: + # Calculate remaining buffer size + remaining_buffer = ( + len(buf) # int + - (buf_ptr.value + - buf_ptr_zero) // self.bytes_per_sample + ) + + # Convert buffer pointer to the desired type + ptr = ctypes.cast( + buf_ptr, + ctypes.POINTER(opus.opus_int16) + ) + + # Read the next section of PCM + ns = opus.op_read( + of, + ptr, + remaining_buffer, + ogg.c_int_p() + ) + + # Check for errors + if ns<0: + raise PyOggError( + "Error while reading OggOpus file. "+ + "Error code: {}".format(ns) + ) + + # Increment the pointer + buf_ptr.value += ( + ns + * self.bytes_per_sample + * self.channels + ) + assert buf_ptr.value is not None # for mypy + + samples += ns + + # Check if we've finished + if ns==0: + break + + # Close the open file + opus.op_free(of) + + # Opus files are always stored at 48k samples per second + #: Number of samples per second (per channel). Always 48,000. + self.frequency = 48000 + + # Cast buffer to a one-dimensional array of chars + #: Raw PCM data from audio file. + CharBuffer = ( + ctypes.c_byte + * (self.bytes_per_sample * self.channels * pcm_size) + ) + self.buffer = CharBuffer.from_buffer(buf) diff --git a/sbapp/pyogg/opus_file_stream.py b/sbapp/pyogg/opus_file_stream.py new file mode 100644 index 0000000..b3e1723 --- /dev/null +++ b/sbapp/pyogg/opus_file_stream.py @@ -0,0 +1,127 @@ +import ctypes + +from . import ogg +from . import opus +from .pyogg_error import PyOggError + +class OpusFileStream: + def __init__(self, path): + """Opens an OggOpus file as a stream. + + path should be a string giving the filename of the file to + open. Unicode file names may not work correctly. + + An exception will be raised if the file cannot be opened + correctly. + + """ + error = ctypes.c_int() + + self.of = opus.op_open_file(ogg.to_char_p(path), ctypes.pointer(error)) + + if error.value != 0: + self.of = None + raise PyOggError("file couldn't be opened or doesn't exist. Error code : {}".format(error.value)) + + #: Number of channels in audio file + self.channels = opus.op_channel_count(self.of, -1) + + #: Total PCM Length + self.pcm_size = opus.op_pcm_total(self.of, -1) + + #: Number of samples per second (per channel) + self.frequency = 48000 + + # The buffer size should be (per channel) large enough to + # hold 120ms (the largest possible Opus frame) at 48kHz. + # See https://opus-codec.org/docs/opusfile_api-0.7/group__stream__decoding.html#ga963c917749335e29bb2b698c1cb20a10 + self.buffer_size = self.frequency // 1000 * 120 * self.channels + self.Buf = opus.opus_int16 * self.buffer_size + self._buf = self.Buf() + self.buffer_ptr = ctypes.cast( + ctypes.pointer(self._buf), + opus.opus_int16_p + ) + + #: Bytes per sample + self.bytes_per_sample = ctypes.sizeof(opus.opus_int16) + + def __del__(self): + if self.of is not None: + opus.op_free(self.of) + + def get_buffer(self): + """Obtains the next frame of PCM samples. + + Returns an array of signed 16-bit integers. If the file + is in stereo, the left and right channels are interleaved. + + Returns None when all data has been read. + + The array that is returned should be either processed or + copied before the next call to :meth:`~get_buffer` or + :meth:`~get_buffer_as_array` as the array's memory is reused for + each call. + + """ + # Read the next frame + samples_read = opus.op_read( + self.of, + self.buffer_ptr, + self.buffer_size, + None + ) + + # Check for errors + if samples_read < 0: + raise PyOggError( + "Failed to read OpusFileStream. Error {:d}".format(samples_read) + ) + + # Check if we've reached the end of the stream + if samples_read == 0: + return None + + # Cast the pointer to opus_int16 to an array of the + # correct size + result_ptr = ctypes.cast( + self.buffer_ptr, + ctypes.POINTER(opus.opus_int16 * (samples_read*self.channels)) + ) + + # Convert the array to Python bytes + return bytes(result_ptr.contents) + + def get_buffer_as_array(self): + """Provides the buffer as a NumPy array. + + Note that the underlying data type is 16-bit signed + integers. + + Does not copy the underlying data, so the returned array + should either be processed or copied before the next call + to :meth:`~get_buffer` or :meth:`~get_buffer_as_array`. + + """ + import numpy # type: ignore + + # Read the next samples from the stream + buf = self.get_buffer() + + # Check if we've come to the end of the stream + if buf is None: + return None + + # Convert the bytes buffer to a NumPy array + array = numpy.frombuffer( + buf, + dtype=numpy.int16 + ) + + # Reshape the array + return array.reshape( + (len(buf) + // self.bytes_per_sample + // self.channels, + self.channels) + ) diff --git a/sbapp/pyogg/py.typed b/sbapp/pyogg/py.typed new file mode 100644 index 0000000..d4defd9 --- /dev/null +++ b/sbapp/pyogg/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. This package uses inline types. \ No newline at end of file diff --git a/sbapp/pyogg/pyogg_error.py b/sbapp/pyogg/pyogg_error.py new file mode 100644 index 0000000..35f28bf --- /dev/null +++ b/sbapp/pyogg/pyogg_error.py @@ -0,0 +1,2 @@ +class PyOggError(Exception): + pass diff --git a/sbapp/pyogg/vorbis.py b/sbapp/pyogg/vorbis.py new file mode 100644 index 0000000..a8432ba --- /dev/null +++ b/sbapp/pyogg/vorbis.py @@ -0,0 +1,855 @@ +############################################################ +# Vorbis license: # +############################################################ +""" +Copyright (c) 2002-2015 Xiph.org Foundation + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +- Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +- Neither the name of the Xiph.org Foundation nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import ctypes +import ctypes.util +from traceback import print_exc as _print_exc +import os + +OV_EXCLUDE_STATIC_CALLBACKS = False + +__MINGW32__ = False + +_WIN32 = False + +from .ogg import * + +from .library_loader import ExternalLibrary, ExternalLibraryError + +__here = os.getcwd() + +libvorbis = None + +try: + names = { + "Windows": "libvorbis.dll", + "Darwin": "libvorbis.0.dylib", + "external": "vorbis" + } + libvorbis = Library.load(names, tests = [lambda lib: hasattr(lib, "vorbis_info_init")]) +except ExternalLibraryError: + pass +except: + _print_exc() + +libvorbisfile = None + +try: + names = { + "Windows": "libvorbisfile.dll", + "Darwin": "libvorbisfile.3.dylib", + "external": "vorbisfile" + } + libvorbisfile = Library.load(names, tests = [lambda lib: hasattr(lib, "ov_clear")]) +except ExternalLibraryError: + pass +except: + _print_exc() + +libvorbisenc = None + +# In some cases, libvorbis may also have the libvorbisenc functionality. +libvorbis_is_also_libvorbisenc = True + +for f in ("vorbis_encode_ctl", + "vorbis_encode_init", + "vorbis_encode_init_vbr", + "vorbis_encode_setup_init", + "vorbis_encode_setup_managed", + "vorbis_encode_setup_vbr"): + if not hasattr(libvorbis, f): + libvorbis_is_also_libvorbisenc = False + break + +if libvorbis_is_also_libvorbisenc: + libvorbisenc = libvorbis +else: + try: + names = { + "Windows": "libvorbisenc.dll", + "Darwin": "libvorbisenc.2.dylib", + "external": "vorbisenc" + } + libvorbisenc = Library.load(names, tests = [lambda lib: hasattr(lib, "vorbis_encode_init")]) + except ExternalLibraryError: + pass + except: + _print_exc() + +if libvorbis is None: + PYOGG_VORBIS_AVAIL = False +else: + PYOGG_VORBIS_AVAIL = True + +if libvorbisfile is None: + PYOGG_VORBIS_FILE_AVAIL = False +else: + PYOGG_VORBIS_FILE_AVAIL = True + +if libvorbisenc is None: + PYOGG_VORBIS_ENC_AVAIL = False +else: + PYOGG_VORBIS_ENC_AVAIL = True + +# FIXME: What's the story with the lack of checking for PYOGG_VORBIS_ENC_AVAIL? +# We just seem to assume that it's available. + +if PYOGG_OGG_AVAIL and PYOGG_VORBIS_AVAIL and PYOGG_VORBIS_FILE_AVAIL: + # Sanity check also satisfies mypy type checking + assert libogg is not None + assert libvorbis is not None + assert libvorbisfile is not None + + + # codecs + class vorbis_info(ctypes.Structure): + """ + Wrapper for: + typedef struct vorbis_info vorbis_info; + """ + _fields_ = [("version", c_int), + ("channels", c_int), + ("rate", c_long), + + ("bitrate_upper", c_long), + ("bitrate_nominal", c_long), + ("bitrate_lower", c_long), + ("bitrate_window", c_long), + ("codec_setup", c_void_p)] + + + + class vorbis_dsp_state(ctypes.Structure): + """ + Wrapper for: + typedef struct vorbis_dsp_state vorbis_dsp_state; + """ + _fields_ = [("analysisp", c_int), + ("vi", POINTER(vorbis_info)), + ("pcm", c_float_p_p), + ("pcmret", c_float_p_p), + ("pcm_storage", c_int), + ("pcm_current", c_int), + ("pcm_returned", c_int), + + ("preextrapolate", c_int), + ("eofflag", c_int), + + ("lW", c_long), + ("W", c_long), + ("nW", c_long), + ("centerW", c_long), + + ("granulepos", ogg_int64_t), + ("sequence", ogg_int64_t), + + ("glue_bits", ogg_int64_t), + ("time_bits", ogg_int64_t), + ("floor_bits", ogg_int64_t), + ("res_bits", ogg_int64_t), + + ("backend_state", c_void_p)] + + class alloc_chain(ctypes.Structure): + """ + Wrapper for: + typedef struct alloc_chain; + """ + pass + + alloc_chain._fields_ = [("ptr", c_void_p), + ("next", POINTER(alloc_chain))] + + class vorbis_block(ctypes.Structure): + """ + Wrapper for: + typedef struct vorbis_block vorbis_block; + """ + _fields_ = [("pcm", c_float_p_p), + ("opb", oggpack_buffer), + ("lW", c_long), + ("W", c_long), + ("nW", c_long), + ("pcmend", c_int), + ("mode", c_int), + + ("eofflag", c_int), + ("granulepos", ogg_int64_t), + ("sequence", ogg_int64_t), + ("vd", POINTER(vorbis_dsp_state)), + + ("localstore", c_void_p), + ("localtop", c_long), + ("localalloc", c_long), + ("totaluse", c_long), + ("reap", POINTER(alloc_chain)), + + ("glue_bits", c_long), + ("time_bits", c_long), + ("floor_bits", c_long), + ("res_bits", c_long), + + ("internal", c_void_p)] + + class vorbis_comment(ctypes.Structure): + """ + Wrapper for: + typedef struct vorbis_comment vorbis_comment; + """ + _fields_ = [("user_comments", c_char_p_p), + ("comment_lengths", c_int_p), + ("comments", c_int), + ("vendor", c_char_p)] + + + + vi_p = POINTER(vorbis_info) + vc_p = POINTER(vorbis_comment) + vd_p = POINTER(vorbis_dsp_state) + vb_p = POINTER(vorbis_block) + + libvorbis.vorbis_info_init.restype = None + libvorbis.vorbis_info_init.argtypes = [vi_p] + def vorbis_info_init(vi): + libvorbis.vorbis_info_init(vi) + + libvorbis.vorbis_info_clear.restype = None + libvorbis.vorbis_info_clear.argtypes = [vi_p] + def vorbis_info_clear(vi): + libvorbis.vorbis_info_clear(vi) + + libvorbis.vorbis_info_blocksize.restype = c_int + libvorbis.vorbis_info_blocksize.argtypes = [vi_p, c_int] + def vorbis_info_blocksize(vi, zo): + return libvorbis.vorbis_info_blocksize(vi, zo) + + libvorbis.vorbis_comment_init.restype = None + libvorbis.vorbis_comment_init.argtypes = [vc_p] + def vorbis_comment_init(vc): + libvorbis.vorbis_comment_init(vc) + + libvorbis.vorbis_comment_add.restype = None + libvorbis.vorbis_comment_add.argtypes = [vc_p, c_char_p] + def vorbis_comment_add(vc, comment): + libvorbis.vorbis_comment_add(vc, comment) + + libvorbis.vorbis_comment_add_tag.restype = None + libvorbis.vorbis_comment_add_tag.argtypes = [vc_p, c_char_p, c_char_p] + def vorbis_comment_add_tag(vc, tag, comment): + libvorbis.vorbis_comment_add_tag(vc, tag, comment) + + libvorbis.vorbis_comment_query.restype = c_char_p + libvorbis.vorbis_comment_query.argtypes = [vc_p, c_char_p, c_int] + def vorbis_comment_query(vc, tag, count): + libvorbis.vorbis_comment_query(vc, tag, count) + + libvorbis.vorbis_comment_query_count.restype = c_int + libvorbis.vorbis_comment_query_count.argtypes = [vc_p, c_char_p] + def vorbis_comment_query_count(vc, tag): + libvorbis.vorbis_comment_query_count(vc, tag) + + libvorbis.vorbis_comment_clear.restype = None + libvorbis.vorbis_comment_clear.argtypes = [vc_p] + def vorbis_comment_clear(vc): + libvorbis.vorbis_comment_clear(vc) + + + + libvorbis.vorbis_block_init.restype = c_int + libvorbis.vorbis_block_init.argtypes = [vd_p, vb_p] + def vorbis_block_init(v,vb): + return libvorbis.vorbis_block_init(v,vb) + + libvorbis.vorbis_block_clear.restype = c_int + libvorbis.vorbis_block_clear.argtypes = [vb_p] + def vorbis_block_clear(vb): + return libvorbis.vorbis_block_clear(vb) + + libvorbis.vorbis_dsp_clear.restype = None + libvorbis.vorbis_dsp_clear.argtypes = [vd_p] + def vorbis_dsp_clear(v): + return libvorbis.vorbis_dsp_clear(v) + + libvorbis.vorbis_granule_time.restype = c_double + libvorbis.vorbis_granule_time.argtypes = [vd_p, ogg_int64_t] + def vorbis_granule_time(v, granulepos): + return libvorbis.vorbis_granule_time(v, granulepos) + + + + libvorbis.vorbis_version_string.restype = c_char_p + libvorbis.vorbis_version_string.argtypes = [] + def vorbis_version_string(): + return libvorbis.vorbis_version_string() + + + + + + libvorbis.vorbis_analysis_init.restype = c_int + libvorbis.vorbis_analysis_init.argtypes = [vd_p, vi_p] + def vorbis_analysis_init(v, vi): + return libvorbis.vorbis_analysis_init(v, vi) + + libvorbis.vorbis_commentheader_out.restype = c_int + libvorbis.vorbis_commentheader_out.argtypes = [vc_p, op_p] + def vorbis_commentheader_out(vc, op): + return libvorbis.vorbis_commentheader_out(vc, op) + + libvorbis.vorbis_analysis_headerout.restype = c_int + libvorbis.vorbis_analysis_headerout.argtypes = [vd_p, vc_p, op_p, op_p, op_p] + def vorbis_analysis_headerout(v,vc, op, op_comm, op_code): + return libvorbis.vorbis_analysis_headerout(v,vc, op, op_comm, op_code) + + libvorbis.vorbis_analysis_buffer.restype = c_float_p_p + libvorbis.vorbis_analysis_buffer.argtypes = [vd_p, c_int] + def vorbis_analysis_buffer(v, vals): + return libvorbis.vorbis_analysis_buffer(v, vals) + + libvorbis.vorbis_analysis_wrote.restype = c_int + libvorbis.vorbis_analysis_wrote.argtypes = [vd_p, c_int] + def vorbis_analysis_wrote(v, vals): + return libvorbis.vorbis_analysis_wrote(v, vals) + + libvorbis.vorbis_analysis_blockout.restype = c_int + libvorbis.vorbis_analysis_blockout.argtypes = [vd_p, vb_p] + def vorbis_analysis_blockout(v, vb): + return libvorbis.vorbis_analysis_blockout(v, vb) + + libvorbis.vorbis_analysis.restype = c_int + libvorbis.vorbis_analysis.argtypes = [vb_p, op_p] + def vorbis_analysis(vb, op): + return libvorbis.vorbis_analysis(vb, op) + + + + + libvorbis.vorbis_bitrate_addblock.restype = c_int + libvorbis.vorbis_bitrate_addblock.argtypes = [vb_p] + def vorbis_bitrate_addblock(vb): + return libvorbis.vorbis_bitrate_addblock(vb) + + libvorbis.vorbis_bitrate_flushpacket.restype = c_int + libvorbis.vorbis_bitrate_flushpacket.argtypes = [vd_p, op_p] + def vorbis_bitrate_flushpacket(vd, op): + return libvorbis.vorbis_bitrate_flushpacket(vd, op) + + + + + libvorbis.vorbis_synthesis_idheader.restype = c_int + libvorbis.vorbis_synthesis_idheader.argtypes = [op_p] + def vorbis_synthesis_idheader(op): + return libvorbis.vorbis_synthesis_idheader(op) + + libvorbis.vorbis_synthesis_headerin.restype = c_int + libvorbis.vorbis_synthesis_headerin.argtypes = [vi_p, vc_p, op_p] + def vorbis_synthesis_headerin(vi, vc, op): + return libvorbis.vorbis_synthesis_headerin(vi, vc, op) + + + + + libvorbis.vorbis_synthesis_init.restype = c_int + libvorbis.vorbis_synthesis_init.argtypes = [vd_p, vi_p] + def vorbis_synthesis_init(v,vi): + return libvorbis.vorbis_synthesis_init(v,vi) + + libvorbis.vorbis_synthesis_restart.restype = c_int + libvorbis.vorbis_synthesis_restart.argtypes = [vd_p] + def vorbis_synthesis_restart(v): + return libvorbis.vorbis_synthesis_restart(v) + + libvorbis.vorbis_synthesis.restype = c_int + libvorbis.vorbis_synthesis.argtypes = [vb_p, op_p] + def vorbis_synthesis(vb, op): + return libvorbis.vorbis_synthesis(vb, op) + + libvorbis.vorbis_synthesis_trackonly.restype = c_int + libvorbis.vorbis_synthesis_trackonly.argtypes = [vb_p, op_p] + def vorbis_synthesis_trackonly(vb, op): + return libvorbis.vorbis_synthesis_trackonly(vb, op) + + libvorbis.vorbis_synthesis_blockin.restype = c_int + libvorbis.vorbis_synthesis_blockin.argtypes = [vd_p, vb_p] + def vorbis_synthesis_blockin(v, vb): + return libvorbis.vorbis_synthesis_blockin(v, vb) + + libvorbis.vorbis_synthesis_pcmout.restype = c_int + libvorbis.vorbis_synthesis_pcmout.argtypes = [vd_p, c_float_p_p_p] + def vorbis_synthesis_pcmout(v, pcm): + return libvorbis.vorbis_synthesis_pcmout(v, pcm) + + libvorbis.vorbis_synthesis_lapout.restype = c_int + libvorbis.vorbis_synthesis_lapout.argtypes = [vd_p, c_float_p_p_p] + def vorbis_synthesis_lapout(v, pcm): + return libvorbis.vorbis_synthesis_lapout(v, pcm) + + libvorbis.vorbis_synthesis_read.restype = c_int + libvorbis.vorbis_synthesis_read.argtypes = [vd_p, c_int] + def vorbis_synthesis_read(v, samples): + return libvorbis.vorbis_synthesis_read(v, samples) + + libvorbis.vorbis_packet_blocksize.restype = c_long + libvorbis.vorbis_packet_blocksize.argtypes = [vi_p, op_p] + def vorbis_packet_blocksize(vi, op): + return libvorbis.vorbis_packet_blocksize(vi, op) + + + + libvorbis.vorbis_synthesis_halfrate.restype = c_int + libvorbis.vorbis_synthesis_halfrate.argtypes = [vi_p, c_int] + def vorbis_synthesis_halfrate(v, flag): + return libvorbis.vorbis_synthesis_halfrate(v, flag) + + libvorbis.vorbis_synthesis_halfrate_p.restype = c_int + libvorbis.vorbis_synthesis_halfrate_p.argtypes = [vi_p] + def vorbis_synthesis_halfrate_p(vi): + return libvorbis.vorbis_synthesis_halfrate_p(vi) + + OV_FALSE = -1 + OV_EOF = -2 + OV_HOLE = -3 + + OV_EREAD = -128 + OV_EFAULT = -129 + OV_EIMPL =-130 + OV_EINVAL =-131 + OV_ENOTVORBIS =-132 + OV_EBADHEADER =-133 + OV_EVERSION =-134 + OV_ENOTAUDIO =-135 + OV_EBADPACKET =-136 + OV_EBADLINK =-137 + OV_ENOSEEK =-138 + # end of codecs + + # vorbisfile + read_func = ctypes.CFUNCTYPE(c_size_t, + c_void_p, + c_size_t, + c_size_t, + c_void_p) + + seek_func = ctypes.CFUNCTYPE(c_int, + c_void_p, + ogg_int64_t, + c_int) + + close_func = ctypes.CFUNCTYPE(c_int, + c_void_p) + + tell_func = ctypes.CFUNCTYPE(c_long, + c_void_p) + + class ov_callbacks(ctypes.Structure): + """ + Wrapper for: + typedef struct ov_callbacks; + """ + + _fields_ = [("read_func", read_func), + ("seek_func", seek_func), + ("close_func", close_func), + ("tell_func", tell_func)] + + NOTOPEN = 0 + PARTOPEN = 1 + OPENED = 2 + STREAMSET = 3 + INITSET = 4 + + class OggVorbis_File(ctypes.Structure): + """ + Wrapper for: + typedef struct OggVorbis_File OggVorbis_File; + """ + + _fields_ = [("datasource", c_void_p), + ("seekable", c_int), + ("offset", ogg_int64_t), + ("end", ogg_int64_t), + ("oy", ogg_sync_state), + + ("links", c_int), + ("offsets", ogg_int64_t_p), + ("dataoffsets", ogg_int64_t_p), + ("serialnos", c_long_p), + ("pcmlengths", ogg_int64_t_p), + ("vi", vi_p), + ("vc", vc_p), + + ("pcm_offset", ogg_int64_t), + ("ready_state", c_int), + ("current_serialno", c_long), + ("current_link", c_int), + + ("bittrack", c_double), + ("samptrack", c_double), + + ("os", ogg_stream_state), + + ("vd", vorbis_dsp_state), + ("vb", vorbis_block), + + ("callbacks", ov_callbacks)] + vf_p = POINTER(OggVorbis_File) + + libvorbisfile.ov_clear.restype = c_int + libvorbisfile.ov_clear.argtypes = [vf_p] + + def ov_clear(vf): + return libvorbisfile.ov_clear(vf) + + libvorbisfile.ov_fopen.restype = c_int + libvorbisfile.ov_fopen.argtypes = [c_char_p, vf_p] + + def ov_fopen(path, vf): + return libvorbisfile.ov_fopen(to_char_p(path), vf) + + libvorbisfile.ov_open_callbacks.restype = c_int + libvorbisfile.ov_open_callbacks.argtypes = [c_void_p, vf_p, c_char_p, c_long, ov_callbacks] + + def ov_open_callbacks(datasource, vf, initial, ibytes, callbacks): + return libvorbisfile.ov_open_callbacks(datasource, vf, initial, ibytes, callbacks) + + def ov_open(*args, **kw): + raise PyOggError("ov_open is not supported, please use ov_fopen instead") + + def ov_test(*args, **kw): + raise PyOggError("ov_test is not supported") + + libvorbisfile.ov_test_callbacks.restype = c_int + libvorbisfile.ov_test_callbacks.argtypes = [c_void_p, vf_p, c_char_p, c_long, ov_callbacks] + + def ov_test_callbacks(datasource, vf, initial, ibytes, callbacks): + return libvorbisfile.ov_test_callbacks(datasource, vf, initial, ibytes, callbacks) + + libvorbisfile.ov_test_open.restype = c_int + libvorbisfile.ov_test_open.argtypes = [vf_p] + + def ov_test_open(vf): + return libvorbisfile.ov_test_open(vf) + + + + + libvorbisfile.ov_bitrate.restype = c_long + libvorbisfile.ov_bitrate.argtypes = [vf_p, c_int] + + def ov_bitrate(vf, i): + return libvorbisfile.ov_bitrate(vf, i) + + libvorbisfile.ov_bitrate_instant.restype = c_long + libvorbisfile.ov_bitrate_instant.argtypes = [vf_p] + + def ov_bitrate_instant(vf): + return libvorbisfile.ov_bitrate_instant(vf) + + libvorbisfile.ov_streams.restype = c_long + libvorbisfile.ov_streams.argtypes = [vf_p] + + def ov_streams(vf): + return libvorbisfile.ov_streams(vf) + + libvorbisfile.ov_seekable.restype = c_long + libvorbisfile.ov_seekable.argtypes = [vf_p] + + def ov_seekable(vf): + return libvorbisfile.ov_seekable(vf) + + libvorbisfile.ov_serialnumber.restype = c_long + libvorbisfile.ov_serialnumber.argtypes = [vf_p, c_int] + + def ov_serialnumber(vf, i): + return libvorbisfile.ov_serialnumber(vf, i) + + + + libvorbisfile.ov_raw_total.restype = ogg_int64_t + libvorbisfile.ov_raw_total.argtypes = [vf_p, c_int] + + def ov_raw_total(vf, i): + return libvorbisfile.ov_raw_total(vf, i) + + libvorbisfile.ov_pcm_total.restype = ogg_int64_t + libvorbisfile.ov_pcm_total.argtypes = [vf_p, c_int] + + def ov_pcm_total(vf, i): + return libvorbisfile.ov_pcm_total(vf, i) + + libvorbisfile.ov_time_total.restype = c_double + libvorbisfile.ov_time_total.argtypes = [vf_p, c_int] + + def ov_time_total(vf, i): + return libvorbisfile.ov_time_total(vf, i) + + + + + libvorbisfile.ov_raw_seek.restype = c_int + libvorbisfile.ov_raw_seek.argtypes = [vf_p, ogg_int64_t] + + def ov_raw_seek(vf, pos): + return libvorbisfile.ov_raw_seek(vf, pos) + + libvorbisfile.ov_pcm_seek.restype = c_int + libvorbisfile.ov_pcm_seek.argtypes = [vf_p, ogg_int64_t] + + def ov_pcm_seek(vf, pos): + return libvorbisfile.ov_pcm_seek(vf, pos) + + libvorbisfile.ov_pcm_seek_page.restype = c_int + libvorbisfile.ov_pcm_seek_page.argtypes = [vf_p, ogg_int64_t] + + def ov_pcm_seek_page(vf, pos): + return libvorbisfile.ov_pcm_seek_page(vf, pos) + + libvorbisfile.ov_time_seek.restype = c_int + libvorbisfile.ov_time_seek.argtypes = [vf_p, c_double] + + def ov_time_seek(vf, pos): + return libvorbisfile.ov_time_seek(vf, pos) + + libvorbisfile.ov_time_seek_page.restype = c_int + libvorbisfile.ov_time_seek_page.argtypes = [vf_p, c_double] + + def ov_time_seek_page(vf, pos): + return libvorbisfile.ov_time_seek_page(vf, pos) + + + + + libvorbisfile.ov_raw_seek_lap.restype = c_int + libvorbisfile.ov_raw_seek_lap.argtypes = [vf_p, ogg_int64_t] + + def ov_raw_seek_lap(vf, pos): + return libvorbisfile.ov_raw_seek_lap(vf, pos) + + libvorbisfile.ov_pcm_seek_lap.restype = c_int + libvorbisfile.ov_pcm_seek_lap.argtypes = [vf_p, ogg_int64_t] + + def ov_pcm_seek_lap(vf, pos): + return libvorbisfile.ov_pcm_seek_lap(vf, pos) + + libvorbisfile.ov_pcm_seek_page_lap.restype = c_int + libvorbisfile.ov_pcm_seek_page_lap.argtypes = [vf_p, ogg_int64_t] + + def ov_pcm_seek_page_lap(vf, pos): + return libvorbisfile.ov_pcm_seek_page_lap(vf, pos) + + libvorbisfile.ov_time_seek_lap.restype = c_int + libvorbisfile.ov_time_seek_lap.argtypes = [vf_p, c_double] + + def ov_time_seek_lap(vf, pos): + return libvorbisfile.ov_time_seek_lap(vf, pos) + + libvorbisfile.ov_time_seek_page_lap.restype = c_int + libvorbisfile.ov_time_seek_page_lap.argtypes = [vf_p, c_double] + + def ov_time_seek_page_lap(vf, pos): + return libvorbisfile.ov_time_seek_page_lap(vf, pos) + + + + libvorbisfile.ov_raw_tell.restype = ogg_int64_t + libvorbisfile.ov_raw_tell.argtypes = [vf_p] + + def ov_raw_tell(vf): + return libvorbisfile.ov_raw_tell(vf) + + libvorbisfile.ov_pcm_tell.restype = ogg_int64_t + libvorbisfile.ov_pcm_tell.argtypes = [vf_p] + + def ov_pcm_tell(vf): + return libvorbisfile.ov_pcm_tell(vf) + + libvorbisfile.ov_time_tell.restype = c_double + libvorbisfile.ov_time_tell.argtypes = [vf_p] + + def ov_time_tell(vf): + return libvorbisfile.ov_time_tell(vf) + + + + libvorbisfile.ov_info.restype = vi_p + libvorbisfile.ov_info.argtypes = [vf_p, c_int] + + def ov_info(vf, link): + return libvorbisfile.ov_info(vf, link) + + libvorbisfile.ov_comment.restype = vc_p + libvorbisfile.ov_comment.argtypes = [vf_p, c_int] + + def ov_comment(vf, link): + return libvorbisfile.ov_comment(vf, link) + + + + libvorbisfile.ov_read_float.restype = c_long + libvorbisfile.ov_read_float.argtypes = [vf_p, c_float_p_p_p, c_int, c_int_p] + + def ov_read_float(vf, pcm_channels, samples, bitstream): + return libvorbisfile.ov_read_float(vf, pcm_channels, samples, bitstream) + + filter_ = ctypes.CFUNCTYPE(None, + c_float_p_p, + c_long, + c_long, + c_void_p) + + try: + libvorbisfile.ov_read_filter.restype = c_long + libvorbisfile.ov_read_filter.argtypes = [vf_p, c_char_p, c_int, c_int, c_int, c_int, c_int_p, filter_, c_void_p] + + def ov_read_filter(vf, buffer, length, bigendianp, word, sgned, bitstream, filter_, filter_param): + return libvorbisfile.ov_read_filter(vf, buffer, length, bigendianp, word, sgned, bitstream, filter_, filter_param) + except: + pass + + libvorbisfile.ov_read.restype = c_long + libvorbisfile.ov_read.argtypes = [vf_p, c_char_p, c_int, c_int, c_int, c_int, c_int_p] + + def ov_read(vf, buffer, length, bigendianp, word, sgned, bitstream): + return libvorbisfile.ov_read(vf, buffer, length, bigendianp, word, sgned, bitstream) + + libvorbisfile.ov_crosslap.restype = c_int + libvorbisfile.ov_crosslap.argtypes = [vf_p, vf_p] + + def ov_crosslap(vf1, cf2): + return libvorbisfile.ov_crosslap(vf1, vf2) + + + + + libvorbisfile.ov_halfrate.restype = c_int + libvorbisfile.ov_halfrate.argtypes = [vf_p, c_int] + + def ov_halfrate(vf, flag): + return libvorbisfile.ov_halfrate(vf, flag) + + libvorbisfile.ov_halfrate_p.restype = c_int + libvorbisfile.ov_halfrate_p.argtypes = [vf_p] + + def ov_halfrate_p(vf): + return libvorbisfile.ov_halfrate_p(vf) + # end of vorbisfile + + try: + # vorbisenc + + # Sanity check also satisfies mypy type checking + assert libvorbisenc is not None + + libvorbisenc.vorbis_encode_init.restype = c_int + libvorbisenc.vorbis_encode_init.argtypes = [vi_p, c_long, c_long, c_long, c_long, c_long] + + def vorbis_encode_init(vi, channels, rate, max_bitrate, nominal_bitrate, min_bitrate): + return libvorbisenc.vorbis_encode_init(vi, channels, rate, max_bitrate, nominal_bitrate, min_bitrate) + + libvorbisenc.vorbis_encode_setup_managed.restype = c_int + libvorbisenc.vorbis_encode_setup_managed.argtypes = [vi_p, c_long, c_long, c_long, c_long, c_long] + + def vorbis_encode_setup_managed(vi, channels, rate, max_bitrate, nominal_bitrate, min_bitrate): + return libvorbisenc.vorbis_encode_setup_managed(vi, channels, rate, max_bitrate, nominal_bitrate, min_bitrate) + + libvorbisenc.vorbis_encode_setup_vbr.restype = c_int + libvorbisenc.vorbis_encode_setup_vbr.argtypes = [vi_p, c_long, c_long, c_float] + + def vorbis_encode_setup_vbr(vi, channels, rate, quality): + return libvorbisenc.vorbis_encode_setup_vbr(vi, channels, rate, quality) + + libvorbisenc.vorbis_encode_init_vbr.restype = c_int + libvorbisenc.vorbis_encode_init_vbr.argtypes = [vi_p, c_long, c_long, c_float] + + def vorbis_encode_init_vbr(vi, channels, rate, quality): + return libvorbisenc.vorbis_encode_init_vbr(vi, channels, rate, quality) + + libvorbisenc.vorbis_encode_setup_init.restype = c_int + libvorbisenc.vorbis_encode_setup_init.argtypes = [vi_p] + + def vorbis_encode_setup_init(vi): + return libvorbisenc.vorbis_encode_setup_init(vi) + + libvorbisenc.vorbis_encode_ctl.restype = c_int + libvorbisenc.vorbis_encode_ctl.argtypes = [vi_p, c_int, c_void_p] + + def vorbis_encode_ctl(vi, number, arg): + return libvorbisenc.vorbis_encode_ctl(vi, number, arg) + + class ovectl_ratemanage_arg(ctypes.Structure): + _fields_ = [("management_active", c_int), + ("bitrate_hard_min", c_long), + ("bitrate_hard_max", c_long), + ("bitrate_hard_window", c_double), + ("bitrate_av_lo", c_long), + ("bitrate_av_hi", c_long), + ("bitrate_av_window", c_double), + ("bitrate_av_window_center", c_double)] + + class ovectl_ratemanage2_arg(ctypes.Structure): + _fields_ = [("management_active", c_int), + ("bitrate_limit_min_kbps", c_long), + ("bitrate_limit_max_kbps", c_long), + ("bitrate_limit_reservoir_bits", c_long), + ("bitrate_limit_reservoir_bias", c_double), + ("bitrate_average_kbps", c_long), + ("bitrate_average_damping", c_double)] + + OV_ECTL_RATEMANAGE2_GET =0x14 + + OV_ECTL_RATEMANAGE2_SET =0x15 + + OV_ECTL_LOWPASS_GET =0x20 + + OV_ECTL_LOWPASS_SET =0x21 + + OV_ECTL_IBLOCK_GET =0x30 + + OV_ECTL_IBLOCK_SET =0x31 + + OV_ECTL_COUPLING_GET =0x40 + + OV_ECTL_COUPLING_SET =0x41 + + OV_ECTL_RATEMANAGE_GET =0x10 + + OV_ECTL_RATEMANAGE_SET =0x11 + + OV_ECTL_RATEMANAGE_AVG =0x12 + + OV_ECTL_RATEMANAGE_HARD =0x13 + # end of vorbisenc + except: + pass diff --git a/sbapp/pyogg/vorbis_file.py b/sbapp/pyogg/vorbis_file.py new file mode 100644 index 0000000..918f1e8 --- /dev/null +++ b/sbapp/pyogg/vorbis_file.py @@ -0,0 +1,161 @@ +import ctypes + +from . import vorbis +from .audio_file import AudioFile +from .pyogg_error import PyOggError + +# TODO: Issue #70: Vorbis files with multiple logical bitstreams could +# be supported by chaining VorbisFile instances (with say a 'next' +# attribute that points to the next VorbisFile that would contain the +# PCM for the next logical bitstream). A considerable constraint to +# implementing this was that examples files that demonstrated multiple +# logical bitstreams couldn't be found or created. Note that even +# Audacity doesn't handle multiple logical bitstreams (see +# https://wiki.audacityteam.org/wiki/OGG#Importing_multiple_stream_files). + +# TODO: Issue #53: Unicode file names are not well supported. +# They may work in macOS and Linux, they don't work under Windows. + +class VorbisFile(AudioFile): + def __init__(self, + path: str, + bytes_per_sample: int = 2, + signed:bool = True) -> None: + """Load an OggVorbis File. + + path specifies the location of the Vorbis file. Unicode + filenames may not work correctly under Windows. + + bytes_per_sample specifies the word size of the PCM. It may + be either 1 or 2. Specifying one byte per sample will save + memory but will likely decrease the quality of the decoded + audio. + + Only Vorbis files with a single logical bitstream are + supported. + + """ + # Sanity check the number of bytes per sample + assert bytes_per_sample==1 or bytes_per_sample==2 + + # Sanity check that the vorbis library is available (for mypy) + assert vorbis.libvorbisfile is not None + + #: Bytes per sample + self.bytes_per_sample = bytes_per_sample + + #: Samples are signed (rather than unsigned) + self.signed = signed + + # Create a Vorbis File structure + vf = vorbis.OggVorbis_File() + + # Attempt to open the Vorbis file + error = vorbis.libvorbisfile.ov_fopen( + vorbis.to_char_p(path), + ctypes.byref(vf) + ) + + # Check for errors during opening + if error != 0: + raise PyOggError( + ("File '{}' couldn't be opened or doesn't exist. "+ + "Error code : {}").format(path, error) + ) + + # Extract info from the Vorbis file + info = vorbis.libvorbisfile.ov_info( + ctypes.byref(vf), + -1 # the current logical bitstream + ) + + #: Number of channels in audio file. + self.channels = info.contents.channels + + #: Number of samples per second (per channel), 44100 for + # example. + self.frequency = info.contents.rate + + # Extract the total number of PCM samples for the first + # logical bitstream + pcm_length_samples = vorbis.libvorbisfile.ov_pcm_total( + ctypes.byref(vf), + 0 # to extract the length of the first logical bitstream + ) + + # Create a memory block to store the entire PCM + Buffer = ( + ctypes.c_char + * ( + pcm_length_samples + * self.bytes_per_sample + * self.channels + ) + ) + self.buffer = Buffer() + + # Create a pointer to the newly allocated memory. It + # seems we can only do pointer arithmetic on void + # pointers. See + # https://mattgwwalker.wordpress.com/2020/05/30/pointer-manipulation-in-python/ + buf_ptr = ctypes.cast( + ctypes.pointer(self.buffer), + ctypes.c_void_p + ) + + # Storage for the index of the logical bitstream + bitstream_previous = None + bitstream = ctypes.c_int() + + # Set bytes remaining to read into PCM + read_size = len(self.buffer) + + while True: + # Convert buffer pointer to the desired type + ptr = ctypes.cast( + buf_ptr, + ctypes.POINTER(ctypes.c_char) + ) + + # Attempt to decode PCM from the Vorbis file + result = vorbis.libvorbisfile.ov_read( + ctypes.byref(vf), + ptr, + read_size, + 0, # Little endian + self.bytes_per_sample, + int(self.signed), + ctypes.byref(bitstream) + ) + + # Check for errors + if result < 0: + raise PyOggError( + "An error occurred decoding the Vorbis file: "+ + f"Error code: {result}" + ) + + # Check that the bitstream hasn't changed as we only + # support Vorbis files with a single logical bitstream. + if bitstream_previous is None: + bitstream_previous = bitstream + else: + if bitstream_previous != bitstream: + raise PyOggError( + "PyOgg currently supports Vorbis files "+ + "with only one logical stream" + ) + + # Check for end of file + if result == 0: + break + + # Calculate the number of bytes remaining to read into PCM + read_size -= result + + # Update the pointer into the buffer + buf_ptr.value += result + + + # Close the file and clean up memory + vorbis.libvorbisfile.ov_clear(ctypes.byref(vf)) diff --git a/sbapp/pyogg/vorbis_file_stream.py b/sbapp/pyogg/vorbis_file_stream.py new file mode 100644 index 0000000..57677ba --- /dev/null +++ b/sbapp/pyogg/vorbis_file_stream.py @@ -0,0 +1,110 @@ +import ctypes + +from . import vorbis +from .pyogg_error import PyOggError + +class VorbisFileStream: + def __init__(self, path, buffer_size=8192): + self.exists = False + self._buffer_size = buffer_size + + self.vf = vorbis.OggVorbis_File() + error = vorbis.ov_fopen(path, ctypes.byref(self.vf)) + if error != 0: + raise PyOggError("file couldn't be opened or doesn't exist. Error code : {}".format(error)) + + info = vorbis.ov_info(ctypes.byref(self.vf), -1) + + #: Number of channels in audio file. + self.channels = info.contents.channels + + #: Number of samples per second (per channel). Always + # 48,000. + self.frequency = info.contents.rate + + array = (ctypes.c_char*(self._buffer_size*self.channels))() + + self.buffer_ = ctypes.cast(ctypes.pointer(array), ctypes.c_char_p) + + self.bitstream = ctypes.c_int() + self.bitstream_pointer = ctypes.pointer(self.bitstream) + + self.exists = True # TODO: is this the best place for this statement? + + #: Bytes per sample + self.bytes_per_sample = 2 # TODO: Where is this defined? + + def __del__(self): + if self.exists: + vorbis.ov_clear(ctypes.byref(self.vf)) + self.exists = False + + def clean_up(self): + vorbis.ov_clear(ctypes.byref(self.vf)) + + self.exists = False + + def get_buffer(self): + """get_buffer() -> bytesBuffer, bufferLength + + Returns None when all data has been read from the file. + + """ + if not self.exists: + return None + buffer = [] + total_bytes_written = 0 + + while True: + new_bytes = vorbis.ov_read(ctypes.byref(self.vf), self.buffer_, self._buffer_size*self.channels - total_bytes_written, 0, 2, 1, self.bitstream_pointer) + + array_ = ctypes.cast(self.buffer_, ctypes.POINTER(ctypes.c_char*(self._buffer_size*self.channels))).contents + + buffer.append(array_.raw[:new_bytes]) + + total_bytes_written += new_bytes + + if new_bytes == 0 or total_bytes_written >= self._buffer_size*self.channels: + break + + out_buffer = b"".join(buffer) + + if total_bytes_written == 0: + self.clean_up() + return(None) + + return out_buffer + + def get_buffer_as_array(self): + """Provides the buffer as a NumPy array. + + Note that the underlying data type is 16-bit signed + integers. + + Does not copy the underlying data, so the returned array + should either be processed or copied before the next call + to get_buffer() or get_buffer_as_array(). + + """ + import numpy # type: ignore + + # Read the next samples from the stream + buf = self.get_buffer() + + # Check if we've come to the end of the stream + if buf is None: + return None + + # Convert the bytes buffer to a NumPy array + array = numpy.frombuffer( + buf, + dtype=numpy.int16 + ) + + # Reshape the array + return array.reshape( + (len(buf) + // self.bytes_per_sample + // self.channels, + self.channels) + ) diff --git a/sbapp/services/sidebandservice.py b/sbapp/services/sidebandservice.py index 91c7cf0..8f1ef31 100644 --- a/sbapp/services/sidebandservice.py +++ b/sbapp/services/sidebandservice.py @@ -58,6 +58,8 @@ class SidebandService(): 0x1a86: [0x5523, 0x7523, 0x55D4], # Qinheng 0x0483: [0x5740], # ST CDC 0x2E8A: [0x0005, 0x000A], # Raspberry Pi Pico + 0x239A: [0x8029], # Adafruit (RAK4631) + 0x303A: [0x1001], # ESP-32S3 } def android_notification(self, title="", content="", ticker="", group=None, context_id=None): diff --git a/sbapp/sideband/audioproc.py b/sbapp/sideband/audioproc.py new file mode 100644 index 0000000..fcca182 --- /dev/null +++ b/sbapp/sideband/audioproc.py @@ -0,0 +1,193 @@ +import os +import io +import sh +import math +import time +import struct +import numpy as np +import RNS +import LXMF + +if RNS.vendor.platformutils.is_android(): + from pyogg import OpusFile, OpusBufferedEncoder, OggOpusWriter + from pydub import AudioSegment +else: + if RNS.vendor.platformutils.is_linux(): + from sbapp.pyogg import OpusFile, OpusBufferedEncoder, OggOpusWriter + else: + from pyogg import OpusFile, OpusBufferedEncoder, OggOpusWriter + + from sbapp.pydub import AudioSegment + +codec2_modes = { + # LXMF.AM_CODEC2_450PWB: ???, # No bindings + # LXMF.AM_CODEC2_450: ???, # No bindings + LXMF.AM_CODEC2_700C: 700, + LXMF.AM_CODEC2_1200: 1200, + LXMF.AM_CODEC2_1300: 1300, + LXMF.AM_CODEC2_1400: 1400, + LXMF.AM_CODEC2_1600: 1600, + LXMF.AM_CODEC2_2400: 2400, + LXMF.AM_CODEC2_3200: 3200, +} + +def samples_from_ogg(file_path=None, output_rate=8000): + if file_path != None and os.path.isfile(file_path): + opus_file = OpusFile(file_path) + audio = AudioSegment( + bytes(opus_file.as_array()), + frame_rate=opus_file.frequency, + sample_width=opus_file.bytes_per_sample, + channels=opus_file.channels) + + audio = audio.split_to_mono()[0] + audio = audio.apply_gain(-audio.max_dBFS) + audio = audio.set_frame_rate(output_rate) + audio = audio.set_sample_width(2) + + return audio.get_array_of_samples() + +def resample(samples, width, channels, input_rate, output_rate, normalize): + audio = AudioSegment( + samples, + frame_rate=input_rate, + sample_width=width, + channels=channels) + + if normalize: + audio = audio.apply_gain(-audio.max_dBFS) + + resampled_audio = audio.set_frame_rate(output_rate) + return resampled_audio.get_array_of_samples().tobytes() + +def samples_to_ogg(samples=None, file_path=None, normalize=False, input_channels=1, input_sample_width=2, input_rate=8000, output_rate=12000, profile="audio"): + try: + if file_path != None and samples != None: + if input_rate != output_rate or normalize: + samples = resample(samples, input_sample_width, input_channels, input_rate, output_rate, normalize) + + pcm_data = io.BytesIO(samples) + channels = input_channels; samples_per_second = output_rate; bytes_per_sample = 2 + frame_duration_ms = 60 + + opus_buffered_encoder = OpusBufferedEncoder() + opus_buffered_encoder.set_application(profile) + opus_buffered_encoder.set_sampling_frequency(samples_per_second) + opus_buffered_encoder.set_channels(channels) + opus_buffered_encoder.set_frame_size(frame_duration_ms) + ogg_opus_writer = OggOpusWriter(file_path, opus_buffered_encoder) + + frame_duration = frame_duration_ms/1000.0 + frame_size = int(frame_duration * samples_per_second) + bytes_per_frame = frame_size*bytes_per_sample + + ogg_opus_writer.write(memoryview(bytearray(samples))) + ogg_opus_writer.close() + + return True + + except Exception as e: + RNS.trace_exception(e) + return False + +def samples_to_wav(samples=None, file_path=None): + if samples != None and file_path != None: + import wave + with wave.open(file_path, "wb") as wf: + wf.setnchannels(1) + wf.setsampwidth(2) + wf.setframerate(8000) + wf.writeframes(samples) + return True + +def voice_processing(input_path): + try: + ffmpeg = None + ffmpeg = sh.ffmpeg + if ffmpeg: + filters = "highpass=f=250, lowpass=f=3000,speechnorm=e=12.5:r=0.0001:l=1" + output_bitrate = "12k" + opus_apptype = "audio" + output_path = input_path.replace(".ogg","")+".p.ogg" + args = [ + "-i", input_path, "-filter:a", filters, + "-c:a", "libopus", "-application", opus_apptype, + "-vbr", "on","-b:a", output_bitrate, output_path] + try: + try: + os.unlink(output_path) + except: + pass + ffmpeg(*args) + return output_path + except Exception as e: + RNS.log("Could not process audio with ffmpeg", RNS.LOG_ERROR) + RNS.trace_exception(e) + return None + + except Exception as e: + return None + +def detect_codec2(): + try: + import pycodec2 + return True + except Exception as e: + RNS.log("Could not import codec2 module, libcodec2 is probably not installed or available", RNS.LOG_ERROR) + RNS.trace_exception(e) + + return False + +# Samples must be 8KHz, 16-bit, 1 channel +def encode_codec2(samples, mode): + ap_start = time.time() + import pycodec2 + if not mode in codec2_modes: + return None + + c2 = pycodec2.Codec2(codec2_modes[mode]) + SPF = c2.samples_per_frame() + PACKET_SIZE = SPF * 2 # 16-bit samples + STRUCT_FORMAT = '{}h'.format(SPF) + F_FRAMES = len(samples)/SPF + N_FRAMES = math.floor(len(samples)/SPF) + # TODO: Add padding to align to whole frames + frames = np.array(samples[0:N_FRAMES*SPF], dtype=np.int16) + + encoded = b"" + for pi in range(0, N_FRAMES): + pstart = pi*SPF + pend = (pi+1)*SPF + frame = frames[pstart:pend] + encoded_packet = c2.encode(frame) + encoded += encoded_packet + + ap_duration = time.time() - ap_start + RNS.log("Codec2 encoding complete in "+RNS.prettytime(ap_duration)+", bytes out: "+str(len(encoded)), RNS.LOG_DEBUG) + + return encoded + +def decode_codec2(encoded_bytes, mode): + ap_start = time.time() + import pycodec2 + if not mode in codec2_modes: + return None + + c2 = pycodec2.Codec2(codec2_modes[mode]) + SPF = c2.samples_per_frame() + BPF = c2.bytes_per_frame() + STRUCT_FORMAT = '{}h'.format(SPF) + N_FRAMES = math.floor(len(encoded_bytes)/BPF) + + decoded = b"" + for pi in range(0, N_FRAMES): + pstart = pi*BPF + pend = (pi+1)*BPF + encoded_packet = encoded_bytes[pstart:pend] + decoded_frame = c2.decode(encoded_packet) + decoded += struct.pack(STRUCT_FORMAT, *decoded_frame) + + ap_duration = time.time() - ap_start + RNS.log("Codec2 decoding complete in "+RNS.prettytime(ap_duration)+", samples out: "+str(len(decoded)), RNS.LOG_DEBUG) + + return decoded \ No newline at end of file diff --git a/sbapp/sideband/core.py b/sbapp/sideband/core.py index 6110f05..e4d144f 100644 --- a/sbapp/sideband/core.py +++ b/sbapp/sideband/core.py @@ -1,7 +1,6 @@ import RNS import LXMF import threading -import plyer import os.path import time import struct @@ -14,12 +13,14 @@ import RNS.Interfaces.Interface as Interface import multiprocessing.connection +from copy import deepcopy from threading import Lock from .res import sideband_fb_data from .sense import Telemeter, Commands from .plugins import SidebandCommandPlugin, SidebandServicePlugin, SidebandTelemetryPlugin if RNS.vendor.platformutils.get_platform() == "android": + import plyer from jnius import autoclass, cast # Squelch excessive method signature logging import jnius.reflect @@ -33,7 +34,8 @@ if RNS.vendor.platformutils.get_platform() == "android": jnius.reflect.log_method = mod jnius.reflect.log = redirect_log() ############################################ - +else: + import sbapp.plyer as plyer class PropagationNodeDetector(): EMITTED_DELTA_GRACE = 300 @@ -104,13 +106,26 @@ class SidebandCore(): def received_announce(self, destination_hash, announced_identity, app_data): # Add the announce to the directory announce # stream logger + + # This reformats the new v0.5.0 announce data back to the expected format + # for Sidebands database and other handling functions. + dn = LXMF.display_name_from_app_data(app_data) + app_data = b"" + if dn != None: + app_data = dn.encode("utf-8") + self.log_announce(destination_hash, app_data, dest_type=SidebandCore.aspect_filter) - def __init__(self, owner_app, config_path = None, is_service=False, is_client=False, android_app_dir=None, verbose=False, owner_service=None, service_context=None, is_daemon=False): + def __init__(self, owner_app, config_path = None, is_service=False, is_client=False, android_app_dir=None, verbose=False, owner_service=None, service_context=None, is_daemon=False, load_config_only=False): self.is_service = is_service self.is_client = is_client self.is_daemon = is_daemon + self.msg_audio = None + self.last_msg_audio = None + self.ptt_playback_lock = threading.Lock() + self.ui_recording = False self.db = None + self.db_lock = threading.Lock() if not self.is_service and not self.is_client: self.is_standalone = True @@ -132,12 +147,14 @@ class SidebandCore(): self.telemetry_send_blocked_until = 0 self.pending_telemetry_request = False self.telemetry_request_max_history = 7*24*60*60 + self.default_lxm_limit = 128*1000 self.state_db = {} self.state_lock = Lock() self.rpc_connection = None self.service_stopped = False self.service_context = service_context self.owner_service = owner_service + self.version_str = "" if config_path == None: self.app_dir = plyer.storagepath.get_home_dir()+"/.config/sideband" @@ -170,6 +187,10 @@ class SidebandCore(): if not os.path.isdir(self.map_cache): os.makedirs(self.map_cache) + self.rec_cache = self.cache_dir+"/rec" + if not os.path.isdir(self.rec_cache): + os.makedirs(self.rec_cache) + self.icon = self.asset_dir+"/icon.png" self.icon_48 = self.asset_dir+"/icon_48.png" self.icon_32 = self.asset_dir+"/icon_32.png" @@ -195,7 +216,7 @@ class SidebandCore(): self.last_lxmf_announce = 0 self.last_if_change_announce = 0 self.interface_local_adding = False - self.next_auto_announce = time.time() + 60*(random.random()*(SidebandCore.AUTO_ANNOUNCE_RANDOM_MAX-SidebandCore.AUTO_ANNOUNCE_RANDOM_MIN)) + self.next_auto_announce = time.time() + 60*(random.random()*(SidebandCore.AUTO_ANNOUNCE_RANDOM_MAX-SidebandCore.AUTO_ANNOUNCE_RANDOM_MIN)+SidebandCore.AUTO_ANNOUNCE_RANDOM_MIN) try: if not os.path.isfile(self.config_path): @@ -223,6 +244,9 @@ class SidebandCore(): except Exception as e: RNS.log("Error while configuring Sideband: "+str(e), RNS.LOG_ERROR) + if load_config_only: + return + # Initialise Reticulum configuration if RNS.vendor.platformutils.get_platform() == "android": try: @@ -265,6 +289,58 @@ class SidebandCore(): threading.Thread(target=load_job, daemon=True).start() + if RNS.vendor.platformutils.is_linux(): + try: + if not self.is_daemon: + lde_level = RNS.LOG_DEBUG + RNS.log("Checking desktop integration...", lde_level) + local_share_dir = os.path.expanduser("~/.local/share") + app_entry_dir = os.path.expanduser("~/.local/share/applications") + icon_dir = os.path.expanduser("~/.local/share/icons/hicolor/512x512/apps") + de_filename = "io.unsigned.sideband.desktop" + de_source = self.asset_dir+"/"+de_filename + de_target = app_entry_dir+"/"+de_filename + icn_source = self.asset_dir+"/icon.png" + icn_target = icon_dir+"/io.unsigned.sideband.png" + if os.path.isdir(local_share_dir): + if not os.path.exists(app_entry_dir): + os.makedirs(app_entry_dir) + + update_de = False + if not os.path.exists(de_target): + update_de = True + else: + included_de_version = "" + with open(de_source, "rb") as sde_file: + included_de_version = sde_file.readline() + existing_de_version = None + with open(de_target, "rb") as de_file: + existing_de_version = de_file.readline() + + if included_de_version != existing_de_version: + update_de = True + RNS.log("Existing desktop entry doesn't match included, updating it", lde_level) + else: + update_de = False + RNS.log("Existing desktop entry matches included, not updating it", lde_level) + + if update_de: + RNS.log("Setting up desktop integration...", lde_level) + import shutil + RNS.log("Installing menu entry to \""+str(de_target)+"\"...", lde_level) + shutil.copy(de_source, de_target) + if not os.path.exists(icon_dir): + os.makedirs(icon_dir) + RNS.log("Installing icon to \""+str(icn_target)+"\"...", lde_level) + shutil.copy(icn_source, icn_target) + else: + RNS.log("Desktop integration is already set up", lde_level) + + except Exception as e: + RNS.log("An error occurred while setting up desktop integration: "+str(e), RNS.LOG_ERROR) + RNS.trace_exception(e) + + def clear_tmp_dir(self): if os.path.isdir(self.tmp_dir): for file in os.listdir(self.tmp_dir): @@ -302,6 +378,8 @@ class SidebandCore(): self.config["lxmf_periodic_sync"] = False self.config["lxmf_ignore_unknown"] = False self.config["lxmf_sync_interval"] = 43200 + self.config["lxmf_require_stamps"] = False + self.config["lxmf_inbound_stamp_cost"] = None self.config["last_lxmf_propagation_node"] = None self.config["nn_home_node"] = None self.config["print_command"] = "lp" @@ -463,6 +541,12 @@ class SidebandCore(): self.config["lxmf_sync_interval"] = 43200 if not "lxmf_try_propagation_on_fail" in self.config: self.config["lxmf_try_propagation_on_fail"] = True + if not "lxmf_require_stamps" in self.config: + self.config["lxmf_require_stamps"] = False + if not "lxmf_ignore_invalid_stamps" in self.config: + self.config["lxmf_ignore_invalid_stamps"] = True + if not "lxmf_inbound_stamp_cost" in self.config: + self.config["lxmf_inbound_stamp_cost"] = None if not "notifications_on" in self.config: self.config["notifications_on"] = True if not "print_command" in self.config: @@ -615,6 +699,8 @@ class SidebandCore(): self.config["telemetry_send_appearance"] = False if not "telemetry_display_trusted_only" in self.config: self.config["telemetry_display_trusted_only"] = False + if not "display_style_from_all" in self.config: + self.config["display_style_from_all"] = False if not "telemetry_receive_trusted_only" in self.config: self.config["telemetry_receive_trusted_only"] = False @@ -713,6 +799,7 @@ class SidebandCore(): if unpacked_config != None and len(unpacked_config) != 0: self.config = unpacked_config self.update_active_lxmf_propagation_node() + self.update_ignore_invalid_stamps() except Exception as e: RNS.log("Error while reloading configuration: "+str(e), RNS.LOG_ERROR) @@ -831,8 +918,14 @@ class SidebandCore(): def notify(self, title, content, group=None, context_id=None): if not self.is_daemon: + if RNS.vendor.platformutils.is_linux(): + from sbapp.ui.helpers import strip_emojis + title = strip_emojis(title) + content = strip_emojis(content) + + if self.config["notifications_on"]: - if RNS.vendor.platformutils.get_platform() == "android": + if RNS.vendor.platformutils.is_android(): if self.getpersistent("permissions.notifications"): notifications_permitted = True else: @@ -860,8 +953,8 @@ class SidebandCore(): except Exception as e: RNS.log("Exception while decoding LXMF destination announce data:"+str(e)) - def list_conversations(self): - result = self._db_conversations() + def list_conversations(self, conversations=True, objects=False): + result = self._db_conversations(conversations, objects) if result != None: return result else: @@ -900,10 +993,52 @@ class SidebandCore(): RNS.log("Error while checking trust for "+RNS.prettyhexrep(context_dest)+": "+str(e), RNS.LOG_ERROR) return False - def should_send_telemetry(self, context_dest): + def is_object(self, context_dest, conv_data = None): + try: + if conv_data == None: + existing_conv = self._db_conversation(context_dest) + else: + existing_conv = conv_data + + if existing_conv != None: + data_dict = existing_conv["data"] + if data_dict != None: + if "is_object" in data_dict: + return data_dict["is_object"] + + return False + + except Exception as e: + RNS.log("Error while checking trust for "+RNS.prettyhexrep(context_dest)+": "+str(e), RNS.LOG_ERROR) + return False + + def ptt_enabled(self, context_dest, conv_data = None): + try: + if conv_data == None: + existing_conv = self._db_conversation(context_dest) + else: + existing_conv = conv_data + + if existing_conv != None: + data_dict = existing_conv["data"] + if data_dict != None: + if "ptt_enabled" in data_dict: + return data_dict["ptt_enabled"] + + return False + + except Exception as e: + RNS.log("Error while checking PTT-enabled for "+RNS.prettyhexrep(context_dest)+": "+str(e), RNS.LOG_ERROR) + return False + + def should_send_telemetry(self, context_dest, conv_data=None): try: if self.config["telemetry_enabled"]: - existing_conv = self._db_conversation(context_dest) + if conv_data == None: + existing_conv = self._db_conversation(context_dest) + else: + existing_conv = conv_data + if existing_conv != None: cd = existing_conv["data"] if cd != None and "telemetry" in cd and cd["telemetry"] == True: @@ -938,9 +1073,13 @@ class SidebandCore(): RNS.log("Error while checking request permissions for "+RNS.prettyhexrep(context_dest)+": "+str(e), RNS.LOG_ERROR) return False - def requests_allowed_from(self, context_dest): + def requests_allowed_from(self, context_dest, conv_data=None): try: - existing_conv = self._db_conversation(context_dest) + if conv_data == None: + existing_conv = self._db_conversation(context_dest) + else: + existing_conv = conv_data + if existing_conv != None: cd = existing_conv["data"] if cd != None and "allow_requests" in cd and cd["allow_requests"] == True: @@ -994,15 +1133,15 @@ class SidebandCore(): app_data = RNS.Identity.recall_app_data(context_dest) if app_data != None: if existing_conv["trust"] == 1: - return app_data.decode("utf-8") + return LXMF.display_name_from_app_data(app_data) else: - return app_data.decode("utf-8")+" "+RNS.prettyhexrep(context_dest) + return LXMF.display_name_from_app_data(app_data)+" "+RNS.prettyhexrep(context_dest) else: return RNS.prettyhexrep(context_dest) else: app_data = RNS.Identity.recall_app_data(context_dest) if app_data != None: - return app_data.decode("utf-8")+" "+RNS.prettyhexrep(context_dest) + return LXMF.display_name_from_app_data(app_data)+" "+RNS.prettyhexrep(context_dest) else: return RNS.prettyhexrep(context_dest) @@ -1043,6 +1182,12 @@ class SidebandCore(): def untrusted_conversation(self, context_dest): self._db_conversation_set_trusted(context_dest, False) + def conversation_set_object(self, context_dest, is_object): + self._db_conversation_set_object(context_dest, is_object) + + def conversation_set_ptt_enabled(self, context_dest, ptt_enabled): + self._db_conversation_set_ptt_enabled(context_dest, ptt_enabled) + def send_telemetry_in_conversation(self, context_dest): self._db_conversation_set_telemetry(context_dest, True) @@ -1138,7 +1283,7 @@ class SidebandCore(): {Commands.TELEMETRY_REQUEST: request_timebase}, ]} - lxm = LXMF.LXMessage(dest, source, "", desired_method=desired_method, fields = lxm_fields) + lxm = LXMF.LXMessage(dest, source, "", desired_method=desired_method, fields = lxm_fields, include_ticket=True) lxm.request_timebase = request_timebase lxm.register_delivery_callback(self.telemetry_request_finished) lxm.register_failed_callback(self.telemetry_request_finished) @@ -1205,7 +1350,7 @@ class SidebandCore(): telemetry_timebase = max(telemetry_timebase, ts) if telemetry_timebase > (self.getpersistent(f"telemetry.{RNS.hexrep(to_addr, delimit=False)}.last_send_success_timebase") or 0): - lxm = LXMF.LXMessage(dest, source, "", desired_method=desired_method, fields = lxm_fields) + lxm = LXMF.LXMessage(dest, source, "", desired_method=desired_method, fields = lxm_fields, include_ticket=self.is_trusted(to_addr)) lxm.telemetry_timebase = telemetry_timebase lxm.register_delivery_callback(self.outbound_telemetry_finished) lxm.register_failed_callback(self.outbound_telemetry_finished) @@ -1395,11 +1540,27 @@ class SidebandCore(): RNS.log("Error while setting log level over RPC: "+str(e), RNS.LOG_DEBUG) return False + def service_rpc_set_ui_recording(self, recording): + if not RNS.vendor.platformutils.is_android(): + pass + else: + if self.is_service: + self.ui_recording = recording + return True + else: + try: + if self.rpc_connection == None: + self.rpc_connection = multiprocessing.connection.Client(self.rpc_addr, authkey=self.rpc_key) + self.rpc_connection.send({"set_ui_recording": recording}) + response = self.rpc_connection.recv() + return response + except Exception as e: + RNS.log("Error while setting UI recording status over RPC: "+str(e), RNS.LOG_DEBUG) + return False + def getstate(self, prop, allow_cache=False): with self.state_lock: if not self.service_stopped: - # TODO: remove - # us = time.time() if not RNS.vendor.platformutils.is_android(): if prop in self.state_db: @@ -1418,8 +1579,6 @@ class SidebandCore(): self.rpc_connection = multiprocessing.connection.Client(self.rpc_addr, authkey=self.rpc_key) self.rpc_connection.send({"getstate": prop}) response = self.rpc_connection.recv() - # TODO: Remove - # RNS.log("RPC getstate result for "+str(prop)+"="+str(response)+" in "+RNS.prettytime(time.time()-us), RNS.LOG_WARNING) return response except Exception as e: @@ -1503,6 +1662,9 @@ class SidebandCore(): elif "set_debug" in call: self.service_rpc_set_debug(call["set_debug"]) connection.send(True) + elif "set_ui_recording" in call: + self.service_rpc_set_ui_recording(call["set_ui_recording"]) + connection.send(True) elif "get_plugins_info" in call: connection.send(self._get_plugins_info()) else: @@ -1540,10 +1702,20 @@ class SidebandCore(): def __db_connect(self): if self.db == None: - self.db = sqlite3.connect(self.db_path, check_same_thread=False) + self.db = sqlite3.connect(self.db_path, check_same_thread=False, timeout=15.0) return self.db + def __db_reconnect(self): + if self.db != None: + try: + self.db.close() + except Exception as e: + RNS.log("Error while closing database for reconnect. The contained exception was:", RNS.LOG_ERROR) + RNS.trace_exception(e) + self.db = None + return self.__db_connect() + def __db_init(self): db = self.__db_connect() dbc = db.cursor() @@ -1602,67 +1774,6 @@ class SidebandCore(): # db.commit() self.setstate("database_ready", True) - # def _db_getstate(self, prop): - # try: - # db = self.__db_connect() - # dbc = db.cursor() - - # query = "select * from state where property=:uprop" - # dbc.execute(query, {"uprop": prop.encode("utf-8")}) - - # result = dbc.fetchall() - - # if len(result) < 1: - # return None - # else: - # try: - # entry = result[0] - # val = msgpack.unpackb(entry[1]) - - # return val - # except Exception as e: - # RNS.log("Could not unpack state value from database for property \""+str(prop)+"\". The contained exception was: "+str(e), RNS.LOG_ERROR) - # return None - - # except Exception as e: - # RNS.log("An error occurred during getstate database operation: "+str(e), RNS.LOG_ERROR) - # self.db = None - - # def _db_setstate(self, prop, val): - # try: - # uprop = prop.encode("utf-8") - # bval = msgpack.packb(val) - - # if self._db_getstate(prop) == None: - # try: - # db = self.__db_connect() - # dbc = db.cursor() - # query = "INSERT INTO state (property, value) values (?, ?)" - # data = (uprop, bval) - # dbc.execute(query, data) - # db.commit() - - # except Exception as e: - # RNS.log("Error while setting state property "+str(prop)+" in DB: "+str(e), RNS.LOG_ERROR) - # RNS.log("Retrying as update query...", RNS.LOG_ERROR) - # db = self.__db_connect() - # dbc = db.cursor() - # query = "UPDATE state set value=:bval where property=:uprop;" - # dbc.execute(query, {"bval": bval, "uprop": uprop}) - # db.commit() - - # else: - # db = self.__db_connect() - # dbc = db.cursor() - # query = "UPDATE state set value=:bval where property=:uprop;" - # dbc.execute(query, {"bval": bval, "uprop": uprop}) - # db.commit() - - - # except Exception as e: - # RNS.log("An error occurred during setstate database operation: "+str(e), RNS.LOG_ERROR) - # self.db = None - def _db_initpersistent(self): db = self.__db_connect() dbc = db.cursor() @@ -1671,223 +1782,255 @@ class SidebandCore(): db.commit() def _db_getpersistent(self, prop): - try: + with self.db_lock: + try: + db = self.__db_connect() + dbc = db.cursor() + + query = "select * from persistent where property=:uprop" + dbc.execute(query, {"uprop": prop.encode("utf-8")}) + result = dbc.fetchall() + + if len(result) < 1: + return None + else: + try: + entry = result[0] + val = msgpack.unpackb(entry[1]) + if val == None: + query = "delete from persistent where (property=:uprop);" + dbc.execute(query, {"uprop": prop.encode("utf-8")}) + db.commit() + + return val + except Exception as e: + RNS.log("Could not unpack persistent value from database for property \""+str(prop)+"\". The contained exception was: "+str(e), RNS.LOG_ERROR) + return None + + except Exception as e: + RNS.log("An error occurred during persistent getstate database operation: "+str(e), RNS.LOG_ERROR) + self.db = None + + def _db_setpersistent(self, prop, val): + existing_prop = self._db_getpersistent(prop) + + with self.db_lock: + try: + db = self.__db_connect() + dbc = db.cursor() + uprop = prop.encode("utf-8") + bval = msgpack.packb(val) + + if existing_prop == None: + try: + query = "INSERT INTO persistent (property, value) values (?, ?)" + data = (uprop, bval) + dbc.execute(query, data) + db.commit() + + except Exception as e: + RNS.log("Error while setting persistent state property "+str(prop)+" in DB: "+str(e), RNS.LOG_ERROR) + RNS.log("Retrying as update query...") + query = "UPDATE state set value=:bval where property=:uprop;" + dbc.execute(query, {"bval": bval, "uprop": uprop}) + db.commit() + + else: + query = "UPDATE persistent set value=:bval where property=:uprop;" + dbc.execute(query, {"bval": bval, "uprop": uprop}) + db.commit() + + except Exception as e: + RNS.log("An error occurred during persistent setstate database operation: "+str(e), RNS.LOG_ERROR) + self.db = None + + def _db_conversation_update_txtime(self, context_dest, is_retry = False): + with self.db_lock: + try: + db = self.__db_connect() + dbc = db.cursor() + + query = "UPDATE conv set last_tx = ? where dest_context = ?" + data = (time.time(), context_dest) + + dbc.execute(query, data) + result = dbc.fetchall() + db.commit() + except Exception as e: + RNS.log("An error occurred while updating conversation TX time: "+str(e), RNS.LOG_ERROR) + self.__db_reconnect() + # if not is_retry: + # RNS.log("Retrying operation...", RNS.LOG_ERROR) + # self._db_conversation_update_txtime(context_dest, is_retry=True) + + def _db_conversation_set_unread(self, context_dest, unread, tx = False, is_retry = False): + with self.db_lock: + try: + db = self.__db_connect() + dbc = db.cursor() + + if unread: + if tx: + query = "UPDATE conv set unread = ?, last_tx = ? where dest_context = ?" + data = (unread, time.time(), context_dest) + else: + query = "UPDATE conv set unread = ?, last_rx = ? where dest_context = ?" + data = (unread, time.time(), context_dest) + else: + query = "UPDATE conv set unread = ? where dest_context = ?" + data = (unread, context_dest) + + dbc.execute(query, data) + result = dbc.fetchall() + db.commit() + except Exception as e: + RNS.log("An error occurred while updating conversation unread flag: "+str(e), RNS.LOG_ERROR) + self.__db_reconnect() + # if not is_retry: + # RNS.log("Retrying operation...", RNS.LOG_ERROR) + # self._db_conversation_set_unread(context_dest, unread, tx, is_retry=True) + + def _db_telemetry(self, context_dest = None, after = None, before = None, limit = None): + with self.db_lock: db = self.__db_connect() dbc = db.cursor() - - query = "select * from persistent where property=:uprop" - dbc.execute(query, {"uprop": prop.encode("utf-8")}) + + limit_part = "" + if limit: + limit_part = " LIMIT "+str(int(limit)) + order_part = " order by ts DESC"+limit_part + if context_dest == None: + if after != None and before == None: + query = "select * from telemetry where ts>:after_ts"+order_part + dbc.execute(query, {"after_ts": after}) + elif after == None and before != None: + query = "select * from telemetry where ts<:before_ts"+order_part + dbc.execute(query, {"before_ts": before}) + elif after != None and before != None: + query = "select * from telemetry where ts<:before_ts and ts>:after_ts"+order_part + dbc.execute(query, {"before_ts": before, "after_ts": after}) + else: + query = query = "select * from telemetry" + dbc.execute(query, {}) + + else: + if after != None and before == None: + query = "select * from telemetry where dest_context=:context_dest and ts>:after_ts"+order_part + dbc.execute(query, {"context_dest": context_dest, "after_ts": after}) + elif after == None and before != None: + query = "select * from telemetry where dest_context=:context_dest and ts<:before_ts"+order_part + dbc.execute(query, {"context_dest": context_dest, "before_ts": before}) + elif after != None and before != None: + query = "select * from telemetry where dest_context=:context_dest and ts<:before_ts and ts>:after_ts"+order_part + dbc.execute(query, {"context_dest": context_dest, "before_ts": before, "after_ts": after}) + else: + query = query = "select * from telemetry where dest_context=:context_dest"+order_part + dbc.execute(query, {"context_dest": context_dest}) + result = dbc.fetchall() if len(result) < 1: return None else: - try: - entry = result[0] - val = msgpack.unpackb(entry[1]) - if val == None: - query = "delete from persistent where (property=:uprop);" - dbc.execute(query, {"uprop": prop.encode("utf-8")}) - db.commit() + results = {} + for entry in result: + telemetry_source = entry[1] + telemetry_timestamp = entry[2] + telemetry_data = entry[3] + + if not telemetry_source in results: + results[telemetry_source] = [] - return val - except Exception as e: - RNS.log("Could not unpack persistent value from database for property \""+str(prop)+"\". The contained exception was: "+str(e), RNS.LOG_ERROR) + results[telemetry_source].append([telemetry_timestamp, telemetry_data]) + + return results + + def _db_save_telemetry(self, context_dest, telemetry, physical_link = None, source_dest = None, via = None, is_retry = False): + with self.db_lock: + try: + remote_telemeter = Telemeter.from_packed(telemetry) + read_telemetry = remote_telemeter.read_all() + telemetry_timestamp = read_telemetry["time"]["utc"] + + db = self.__db_connect() + dbc = db.cursor() + + query = "select * from telemetry where dest_context=:ctx and ts=:tts" + dbc.execute(query, {"ctx": context_dest, "tts": telemetry_timestamp}) + result = dbc.fetchall() + + if len(result) != 0: + RNS.log("Telemetry entry with source "+RNS.prettyhexrep(context_dest)+" and timestamp "+str(telemetry_timestamp)+" already exists, skipping save", RNS.LOG_DEBUG) return None - - except Exception as e: - RNS.log("An error occurred during persistent getstate database operation: "+str(e), RNS.LOG_ERROR) - self.db = None - def _db_setpersistent(self, prop, val): - try: - db = self.__db_connect() - dbc = db.cursor() - uprop = prop.encode("utf-8") - bval = msgpack.packb(val) + if physical_link != None and len(physical_link) != 0: + remote_telemeter.synthesize("physical_link") + if "rssi" in physical_link: remote_telemeter.sensors["physical_link"].rssi = physical_link["rssi"] + if "snr" in physical_link: remote_telemeter.sensors["physical_link"].snr = physical_link["snr"] + if "q" in physical_link: remote_telemeter.sensors["physical_link"].q = physical_link["q"] + remote_telemeter.sensors["physical_link"].update_data() + telemetry = remote_telemeter.packed() - if self._db_getpersistent(prop) == None: - try: - query = "INSERT INTO persistent (property, value) values (?, ?)" - data = (uprop, bval) - dbc.execute(query, data) - db.commit() - - except Exception as e: - RNS.log("Error while setting persistent state property "+str(prop)+" in DB: "+str(e), RNS.LOG_ERROR) - RNS.log("Retrying as update query...") - query = "UPDATE state set value=:bval where property=:uprop;" - dbc.execute(query, {"bval": bval, "uprop": uprop}) - db.commit() - - else: - query = "UPDATE persistent set value=:bval where property=:uprop;" - dbc.execute(query, {"bval": bval, "uprop": uprop}) - db.commit() - - except Exception as e: - RNS.log("An error occurred during persistent setstate database operation: "+str(e), RNS.LOG_ERROR) - self.db = None - - def _db_conversation_update_txtime(self, context_dest): - db = self.__db_connect() - dbc = db.cursor() - - query = "UPDATE conv set last_tx = ? where dest_context = ?" - data = (time.time(), context_dest) - - dbc.execute(query, data) - result = dbc.fetchall() - db.commit() - - def _db_conversation_set_unread(self, context_dest, unread, tx = False): - db = self.__db_connect() - dbc = db.cursor() - - if unread: - if tx: - query = "UPDATE conv set unread = ?, last_tx = ? where dest_context = ?" - data = (unread, time.time(), context_dest) - else: - query = "UPDATE conv set unread = ?, last_rx = ? where dest_context = ?" - data = (unread, time.time(), context_dest) - else: - query = "UPDATE conv set unread = ? where dest_context = ?" - data = (unread, context_dest) - - dbc.execute(query, data) - result = dbc.fetchall() - db.commit() - - def _db_telemetry(self, context_dest = None, after = None, before = None, limit = None): - db = self.__db_connect() - dbc = db.cursor() - - limit_part = "" - if limit: - limit_part = " LIMIT "+str(int(limit)) - order_part = " order by ts DESC"+limit_part - if context_dest == None: - if after != None and before == None: - query = "select * from telemetry where ts>:after_ts"+order_part - dbc.execute(query, {"after_ts": after}) - elif after == None and before != None: - query = "select * from telemetry where ts<:before_ts"+order_part - dbc.execute(query, {"before_ts": before}) - elif after != None and before != None: - query = "select * from telemetry where ts<:before_ts and ts>:after_ts"+order_part - dbc.execute(query, {"before_ts": before, "after_ts": after}) - else: - query = query = "select * from telemetry" - dbc.execute(query, {}) - - else: - if after != None and before == None: - query = "select * from telemetry where dest_context=:context_dest and ts>:after_ts"+order_part - dbc.execute(query, {"context_dest": context_dest, "after_ts": after}) - elif after == None and before != None: - query = "select * from telemetry where dest_context=:context_dest and ts<:before_ts"+order_part - dbc.execute(query, {"context_dest": context_dest, "before_ts": before}) - elif after != None and before != None: - query = "select * from telemetry where dest_context=:context_dest and ts<:before_ts and ts>:after_ts"+order_part - dbc.execute(query, {"context_dest": context_dest, "before_ts": before, "after_ts": after}) - else: - query = query = "select * from telemetry where dest_context=:context_dest"+order_part - dbc.execute(query, {"context_dest": context_dest}) - - result = dbc.fetchall() - - if len(result) < 1: - return None - else: - results = {} - for entry in result: - telemetry_source = entry[1] - telemetry_timestamp = entry[2] - telemetry_data = entry[3] - - if not telemetry_source in results: - results[telemetry_source] = [] - - results[telemetry_source].append([telemetry_timestamp, telemetry_data]) - - return results - - def _db_save_telemetry(self, context_dest, telemetry, physical_link = None, source_dest = None, via = None): - try: - remote_telemeter = Telemeter.from_packed(telemetry) - read_telemetry = remote_telemeter.read_all() - telemetry_timestamp = read_telemetry["time"]["utc"] - - db = self.__db_connect() - dbc = db.cursor() - - query = "select * from telemetry where dest_context=:ctx and ts=:tts" - dbc.execute(query, {"ctx": context_dest, "tts": telemetry_timestamp}) - result = dbc.fetchall() - - if len(result) != 0: - RNS.log("Telemetry entry with source "+RNS.prettyhexrep(context_dest)+" and timestamp "+str(telemetry_timestamp)+" already exists, skipping save", RNS.LOG_DEBUG) - return None - - if physical_link != None and len(physical_link) != 0: - remote_telemeter.synthesize("physical_link") - if "rssi" in physical_link: remote_telemeter.sensors["physical_link"].rssi = physical_link["rssi"] - if "snr" in physical_link: remote_telemeter.sensors["physical_link"].snr = physical_link["snr"] - if "q" in physical_link: remote_telemeter.sensors["physical_link"].q = physical_link["q"] - remote_telemeter.sensors["physical_link"].update_data() - telemetry = remote_telemeter.packed() - - if source_dest != None: - remote_telemeter.synthesize("received") - remote_telemeter.sensors["received"].by = self.lxmf_destination.hash - remote_telemeter.sensors["received"].via = source_dest - - rl = remote_telemeter.read("location") - if rl and "latitude" in rl and "longitude" in rl and "altitude" in rl: - if self.latest_telemetry != None and "location" in self.latest_telemetry: - ol = self.latest_telemetry["location"] - if ol != None: - if "latitude" in ol and "longitude" in ol and "altitude" in ol: - olat = ol["latitude"]; olon = ol["longitude"]; oalt = ol["altitude"] - rlat = rl["latitude"]; rlon = rl["longitude"]; ralt = rl["altitude"] - if olat != None and olon != None and oalt != None: - if rlat != None and rlon != None and ralt != None: - remote_telemeter.sensors["received"].set_distance( - (olat, olon, oalt), (rlat, rlon, ralt) - ) - - remote_telemeter.sensors["received"].update_data() - telemetry = remote_telemeter.packed() - - if via != None: - if not "received" in remote_telemeter.sensors: + if source_dest != None: remote_telemeter.synthesize("received") + remote_telemeter.sensors["received"].by = self.lxmf_destination.hash + remote_telemeter.sensors["received"].via = source_dest - if "by" in remote_telemeter.sensors["received"].data: - remote_telemeter.sensors["received"].by = remote_telemeter.sensors["received"].data["by"] - if "distance" in remote_telemeter.sensors["received"].data: - remote_telemeter.sensors["received"].geodesic_distance = remote_telemeter.sensors["received"].data["distance"]["geodesic"] - remote_telemeter.sensors["received"].euclidian_distance = remote_telemeter.sensors["received"].data["distance"]["euclidian"] + rl = remote_telemeter.read("location") + if rl and "latitude" in rl and "longitude" in rl and "altitude" in rl: + if self.latest_telemetry != None and "location" in self.latest_telemetry: + ol = self.latest_telemetry["location"] + if ol != None: + if "latitude" in ol and "longitude" in ol and "altitude" in ol: + olat = ol["latitude"]; olon = ol["longitude"]; oalt = ol["altitude"] + rlat = rl["latitude"]; rlon = rl["longitude"]; ralt = rl["altitude"] + if olat != None and olon != None and oalt != None: + if rlat != None and rlon != None and ralt != None: + remote_telemeter.sensors["received"].set_distance( + (olat, olon, oalt), (rlat, rlon, ralt) + ) - remote_telemeter.sensors["received"].via = via - remote_telemeter.sensors["received"].update_data() - telemetry = remote_telemeter.packed() - - query = "INSERT INTO telemetry (dest_context, ts, data) values (?, ?, ?)" - data = (context_dest, telemetry_timestamp, telemetry) - dbc.execute(query, data) - db.commit() - self.setstate("app.flags.last_telemetry", time.time()) + remote_telemeter.sensors["received"].update_data() + telemetry = remote_telemeter.packed() - return telemetry + if via != None: + if not "received" in remote_telemeter.sensors: + remote_telemeter.synthesize("received") - except Exception as e: - import traceback - exception_info = "".join(traceback.TracebackException.from_exception(e).format()) - RNS.log(f"A {str(type(e))} occurred while saving telemetry to database: {str(e)}", RNS.LOG_ERROR) - RNS.log(exception_info, RNS.LOG_ERROR) - self.db = None + if "by" in remote_telemeter.sensors["received"].data: + remote_telemeter.sensors["received"].by = remote_telemeter.sensors["received"].data["by"] + if "distance" in remote_telemeter.sensors["received"].data: + remote_telemeter.sensors["received"].geodesic_distance = remote_telemeter.sensors["received"].data["distance"]["geodesic"] + remote_telemeter.sensors["received"].euclidian_distance = remote_telemeter.sensors["received"].data["distance"]["euclidian"] + + remote_telemeter.sensors["received"].via = via + remote_telemeter.sensors["received"].update_data() + telemetry = remote_telemeter.packed() + + query = "INSERT INTO telemetry (dest_context, ts, data) values (?, ?, ?)" + data = (context_dest, telemetry_timestamp, telemetry) + dbc.execute(query, data) + + try: + db.commit() + except Exception as e: + RNS.log("An error occurred while commiting telemetry to database: "+str(e), RNS.LOG_ERROR) + self.__db_reconnect() + # if not is_retry: + # RNS.log("Retrying operation...", RNS.LOG_ERROR) + # self._db_save_telemetry(context_dest, telemetry, physical_link, source_dest, via, is_retry = True) + return + + self.setstate("app.flags.last_telemetry", time.time()) + + return telemetry + + except Exception as e: + import traceback + exception_info = "".join(traceback.TracebackException.from_exception(e).format()) + RNS.log(f"A {str(type(e))} occurred while saving telemetry to database: {str(e)}", RNS.LOG_ERROR) + RNS.log(exception_info, RNS.LOG_ERROR) + self.db = None def _db_update_appearance(self, context_dest, timestamp, appearance, from_bulk_telemetry=False): conv = self._db_conversation(context_dest) @@ -1899,29 +2042,30 @@ class SidebandCore(): self.setpersistent("temp.peer_appearance."+RNS.hexrep(context_dest, delimit=False), ae) else: - data_dict = conv["data"] - if data_dict == None: - data_dict = {} + with self.db_lock: + data_dict = conv["data"] + if data_dict == None: + data_dict = {} - if not "appearance" in data_dict: - data_dict["appearance"] = None + if not "appearance" in data_dict: + data_dict["appearance"] = None - if from_bulk_telemetry and data_dict["appearance"] != SidebandCore.DEFAULT_APPEARANCE: - RNS.log("Aborting appearance update from bulk transfer, since conversation already has appearance set: "+str(appearance)+" / "+str(data_dict["appearance"]), RNS.LOG_DEBUG) - return + if from_bulk_telemetry and data_dict["appearance"] != SidebandCore.DEFAULT_APPEARANCE: + RNS.log("Aborting appearance update from bulk transfer, since conversation already has appearance set: "+str(appearance)+" / "+str(data_dict["appearance"]), RNS.LOG_DEBUG) + return - if data_dict["appearance"] != appearance: - data_dict["appearance"] = appearance - packed_dict = msgpack.packb(data_dict) - - db = self.__db_connect() - dbc = db.cursor() - - query = "UPDATE conv set data = ? where dest_context = ?" - data = (packed_dict, context_dest) - dbc.execute(query, data) - result = dbc.fetchall() - db.commit() + if data_dict["appearance"] != appearance: + data_dict["appearance"] = appearance + packed_dict = msgpack.packb(data_dict) + + db = self.__db_connect() + dbc = db.cursor() + + query = "UPDATE conv set data = ? where dest_context = ?" + data = (packed_dict, context_dest) + dbc.execute(query, data) + result = dbc.fetchall() + db.commit() def _db_get_appearance(self, context_dest, conv = None, raw=False): if context_dest == self.lxmf_destination.hash: @@ -1969,7 +2113,7 @@ class SidebandCore(): return None - def _db_conversation_set_telemetry(self, context_dest, send_telemetry=False): + def _db_conversation_set_telemetry(self, context_dest, send_telemetry=False, is_retry = False): conv = self._db_conversation(context_dest) data_dict = conv["data"] if data_dict == None: @@ -1978,16 +2122,25 @@ class SidebandCore(): data_dict["telemetry"] = send_telemetry packed_dict = msgpack.packb(data_dict) - db = self.__db_connect() - dbc = db.cursor() - - query = "UPDATE conv set data = ? where dest_context = ?" - data = (packed_dict, context_dest) - dbc.execute(query, data) - result = dbc.fetchall() - db.commit() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() + + query = "UPDATE conv set data = ? where dest_context = ?" + data = (packed_dict, context_dest) + dbc.execute(query, data) + result = dbc.fetchall() - def _db_conversation_set_requests(self, context_dest, allow_requests=False): + try: + db.commit() + except Exception as e: + RNS.log("An error occurred while updating conversation telemetry options: "+str(e), RNS.LOG_ERROR) + self.__db_reconnect() + # if not is_retry: + # RNS.log("Retrying operation...", RNS.LOG_ERROR) + # self._db_conversation_set_telemetry(context_dest, send_telemetry, is_retry=True) + + def _db_conversation_set_requests(self, context_dest, allow_requests=False, is_retry=False): conv = self._db_conversation(context_dest) data_dict = conv["data"] if data_dict == None: @@ -1996,171 +2149,270 @@ class SidebandCore(): data_dict["allow_requests"] = allow_requests packed_dict = msgpack.packb(data_dict) - db = self.__db_connect() - dbc = db.cursor() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() + + query = "UPDATE conv set data = ? where dest_context = ?" + data = (packed_dict, context_dest) + dbc.execute(query, data) + result = dbc.fetchall() + + try: + db.commit() + except Exception as e: + RNS.log("An error occurred while updating conversation request options: "+str(e), RNS.LOG_ERROR) + self.__db_reconnect() + if not is_retry: + RNS.log("Retrying operation...", RNS.LOG_ERROR) + self._db_conversation_set_requests(context_dest, allow_requests, is_retry=True) + + def _db_conversation_set_object(self, context_dest, is_object=False): + conv = self._db_conversation(context_dest) + data_dict = conv["data"] + if data_dict == None: + data_dict = {} + + data_dict["is_object"] = is_object + packed_dict = msgpack.packb(data_dict) - query = "UPDATE conv set data = ? where dest_context = ?" - data = (packed_dict, context_dest) - dbc.execute(query, data) - result = dbc.fetchall() - db.commit() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() + + query = "UPDATE conv set data = ? where dest_context = ?" + data = (packed_dict, context_dest) + dbc.execute(query, data) + result = dbc.fetchall() + + try: + db.commit() + except Exception as e: + RNS.log("An error occurred while updating conversation object option: "+str(e), RNS.LOG_ERROR) + self.__db_reconnect() + # if not is_retry: + # RNS.log("Retrying operation...", RNS.LOG_ERROR) + # self._db_conversation_set_object(context_dest, is_object, is_retry=True) + + def _db_conversation_set_ptt_enabled(self, context_dest, ptt_enabled=False): + conv = self._db_conversation(context_dest) + data_dict = conv["data"] + if data_dict == None: + data_dict = {} + + data_dict["ptt_enabled"] = ptt_enabled + packed_dict = msgpack.packb(data_dict) + + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() + + query = "UPDATE conv set data = ? where dest_context = ?" + data = (packed_dict, context_dest) + dbc.execute(query, data) + result = dbc.fetchall() + + try: + db.commit() + except Exception as e: + RNS.log("An error occurred while updating conversation PTT option: "+str(e), RNS.LOG_ERROR) + self.__db_reconnect() + # if not is_retry: + # RNS.log("Retrying operation...", RNS.LOG_ERROR) + # self._db_conversation_set_ptt_enabled(context_dest, ptt_enabled, is_retry=True) def _db_conversation_set_trusted(self, context_dest, trusted): - db = self.__db_connect() - dbc = db.cursor() - - query = "UPDATE conv set trust = ? where dest_context = ?" - data = (trusted, context_dest) - dbc.execute(query, data) - result = dbc.fetchall() - db.commit() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() + + query = "UPDATE conv set trust = ? where dest_context = ?" + data = (trusted, context_dest) + dbc.execute(query, data) + result = dbc.fetchall() + + try: + db.commit() + except Exception as e: + RNS.log("An error occurred while updating conversation trusted option: "+str(e), RNS.LOG_ERROR) + self.__db_reconnect() + # if not is_retry: + # RNS.log("Retrying operation...", RNS.LOG_ERROR) + # self._db_conversation_set_trusted(context_dest, trusted, is_retry=True) def _db_conversation_set_name(self, context_dest, name): - db = self.__db_connect() - dbc = db.cursor() - - query = "UPDATE conv set name=:name_data where dest_context=:ctx;" - dbc.execute(query, {"ctx": context_dest, "name_data": name.encode("utf-8")}) - result = dbc.fetchall() - db.commit() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() + + query = "UPDATE conv set name=:name_data where dest_context=:ctx;" + dbc.execute(query, {"ctx": context_dest, "name_data": name.encode("utf-8")}) + result = dbc.fetchall() + + try: + db.commit() + except Exception as e: + RNS.log("An error occurred while updating conversation name option: "+str(e), RNS.LOG_ERROR) + self.__db_reconnect() + # if not is_retry: + # RNS.log("Retrying operation...", RNS.LOG_ERROR) + # self._db_conversation_set_name(context_dest, name, is_retry=True) - def _db_conversations(self): - db = self.__db_connect() - dbc = db.cursor() - - dbc.execute("select * from conv") - result = dbc.fetchall() + def _db_conversations(self, conversations=True, objects=False): + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() + + dbc.execute("select * from conv") + result = dbc.fetchall() - if len(result) < 1: - return None - else: - convs = [] - for entry in result: - last_rx = entry[1] - last_tx = entry[2] - last_activity = max(last_rx, last_tx) - data = None - try: - data = msgpack.unpackb(entry[7]) - except: - pass + if len(result) < 1: + return None + else: + convs = [] + for entry in result: + is_object = False + last_rx = entry[1] + last_tx = entry[2] + last_activity = max(last_rx, last_tx) + data = None + try: + data = msgpack.unpackb(entry[7]) + if "is_object" in data: + is_object = data["is_object"] + except: + pass - conv = { - "dest": entry[0], - "unread": entry[3], - "last_rx": last_rx, - "last_tx": last_tx, - "last_activity": last_activity, - "trust": entry[5], - "data": data, - } - convs.append(conv) + conv = { + "dest": entry[0], + "unread": entry[3], + "last_rx": last_rx, + "last_tx": last_tx, + "last_activity": last_activity, + "trust": entry[5], + "data": data, + } + should_add = False + if conversations and not is_object: + should_add = True + if objects and is_object: + should_add = True - return sorted(convs, key=lambda c: c["last_activity"], reverse=True) + if should_add: + convs.append(conv) + + return sorted(convs, key=lambda c: c["last_activity"], reverse=True) def _db_announces(self): - db = self.__db_connect() - dbc = db.cursor() - - dbc.execute("select * from announce order by received desc") - result = dbc.fetchall() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() + + dbc.execute("select * from announce order by received desc") + result = dbc.fetchall() - if len(result) < 1: - return None - else: - announces = [] - added_dests = [] - for entry in result: - try: - if not entry[2] in added_dests: - announce = { - "dest": entry[2], - "data": entry[3].decode("utf-8"), - "time": entry[1], - "type": entry[4] - } - added_dests.append(entry[2]) - announces.append(announce) - except Exception as e: - RNS.log("Exception while fetching announce from DB: "+str(e), RNS.LOG_ERROR) + if len(result) < 1: + return None + else: + announces = [] + added_dests = [] + for entry in result: + try: + if not entry[2] in added_dests: + announce = { + "dest": entry[2], + "data": entry[3].decode("utf-8"), + "time": entry[1], + "type": entry[4] + } + added_dests.append(entry[2]) + announces.append(announce) + except Exception as e: + RNS.log("Exception while fetching announce from DB: "+str(e), RNS.LOG_ERROR) - announces.reverse() - return announces + announces.reverse() + return announces def _db_conversation(self, context_dest): - db = self.__db_connect() - dbc = db.cursor() - - query = "select * from conv where dest_context=:ctx" - dbc.execute(query, {"ctx": context_dest}) - result = dbc.fetchall() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() + + query = "select * from conv where dest_context=:ctx" + dbc.execute(query, {"ctx": context_dest}) + result = dbc.fetchall() - if len(result) < 1: - return None - else: - c = result[0] - conv = {} - conv["dest"] = c[0] - conv["last_tx"] = c[1] - conv["last_rx"] = c[2] - conv["unread"] = c[3] - conv["type"] = c[4] - conv["trust"] = c[5] - conv["name"] = c[6].decode("utf-8") - conv["data"] = msgpack.unpackb(c[7]) - conv["last_activity"] = max(c[1], c[2]) - return conv + if len(result) < 1: + return None + else: + c = result[0] + conv = {} + conv["dest"] = c[0] + conv["last_tx"] = c[1] + conv["last_rx"] = c[2] + conv["unread"] = c[3] + conv["type"] = c[4] + conv["trust"] = c[5] + conv["name"] = c[6].decode("utf-8") + conv["data"] = msgpack.unpackb(c[7]) + conv["last_activity"] = max(c[1], c[2]) + return conv def _db_clear_conversation(self, context_dest): RNS.log("Clearing conversation with "+RNS.prettyhexrep(context_dest), RNS.LOG_DEBUG) - db = self.__db_connect() - dbc = db.cursor() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() - query = "delete from lxm where (dest=:ctx_dst or source=:ctx_dst);" - dbc.execute(query, {"ctx_dst": context_dest}) - db.commit() + query = "delete from lxm where (dest=:ctx_dst or source=:ctx_dst);" + dbc.execute(query, {"ctx_dst": context_dest}) + db.commit() def _db_clear_telemetry(self, context_dest): RNS.log("Clearing telemetry for "+RNS.prettyhexrep(context_dest), RNS.LOG_DEBUG) - db = self.__db_connect() - dbc = db.cursor() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() - query = "delete from telemetry where dest_context=:ctx_dst;" - dbc.execute(query, {"ctx_dst": context_dest}) - db.commit() + query = "delete from telemetry where dest_context=:ctx_dst;" + dbc.execute(query, {"ctx_dst": context_dest}) + db.commit() self.setstate("app.flags.last_telemetry", time.time()) def _db_delete_conversation(self, context_dest): RNS.log("Deleting conversation with "+RNS.prettyhexrep(context_dest), RNS.LOG_DEBUG) - db = self.__db_connect() - dbc = db.cursor() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() - query = "delete from conv where (dest_context=:ctx_dst);" - dbc.execute(query, {"ctx_dst": context_dest}) - db.commit() + query = "delete from conv where (dest_context=:ctx_dst);" + dbc.execute(query, {"ctx_dst": context_dest}) + db.commit() def _db_delete_announce(self, context_dest): RNS.log("Deleting announce with "+RNS.prettyhexrep(context_dest), RNS.LOG_DEBUG) - db = self.__db_connect() - dbc = db.cursor() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() - query = "delete from announce where (source=:ctx_dst);" - dbc.execute(query, {"ctx_dst": context_dest}) - db.commit() + query = "delete from announce where (source=:ctx_dst);" + dbc.execute(query, {"ctx_dst": context_dest}) + db.commit() def _db_create_conversation(self, context_dest, name = None, trust = False): RNS.log("Creating conversation for "+RNS.prettyhexrep(context_dest), RNS.LOG_DEBUG) - db = self.__db_connect() - dbc = db.cursor() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() - def_name = "".encode("utf-8") - query = "INSERT INTO conv (dest_context, last_tx, last_rx, unread, type, trust, name, data) values (?, ?, ?, ?, ?, ?, ?, ?)" - data = (context_dest, 0, time.time(), 0, SidebandCore.CONV_P2P, 0, def_name, msgpack.packb(None)) + def_name = "".encode("utf-8") + query = "INSERT INTO conv (dest_context, last_tx, last_rx, unread, type, trust, name, data) values (?, ?, ?, ?, ?, ?, ?, ?)" + data = (context_dest, 0, time.time(), 0, SidebandCore.CONV_P2P, 0, def_name, msgpack.packb(None)) - dbc.execute(query, data) - db.commit() + dbc.execute(query, data) + db.commit() if trust: self._db_conversation_set_trusted(context_dest, True) @@ -2172,123 +2424,105 @@ class SidebandCore(): def _db_delete_message(self, msg_hash): RNS.log("Deleting message "+RNS.prettyhexrep(msg_hash)) - db = self.__db_connect() - dbc = db.cursor() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() - query = "delete from lxm where (lxm_hash=:mhash);" - dbc.execute(query, {"mhash": msg_hash}) - db.commit() + query = "delete from lxm where (lxm_hash=:mhash);" + dbc.execute(query, {"mhash": msg_hash}) + db.commit() def _db_clean_messages(self): RNS.log("Purging stale messages... "+str(self.db_path)) - db = self.__db_connect() - dbc = db.cursor() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() - query = "delete from lxm where (state=:outbound_state or state=:sending_state);" - dbc.execute(query, {"outbound_state": LXMF.LXMessage.OUTBOUND, "sending_state": LXMF.LXMessage.SENDING}) - db.commit() + query = "delete from lxm where (state=:outbound_state or state=:sending_state);" + dbc.execute(query, {"outbound_state": LXMF.LXMessage.OUTBOUND, "sending_state": LXMF.LXMessage.SENDING}) + db.commit() - def _db_message_set_state(self, lxm_hash, state): - db = self.__db_connect() - dbc = db.cursor() - - query = "UPDATE lxm set state = ? where lxm_hash = ?" - data = (state, lxm_hash) - dbc.execute(query, data) - db.commit() - result = dbc.fetchall() + def _db_message_set_state(self, lxm_hash, state, is_retry=False, ratchet_id=None, originator_stamp=None): + msg_extras = None + if ratchet_id != None: + try: + msg = self._db_message(lxm_hash) + if msg != None: + msg_extras = msg["extras"] + + if ratchet_id: + msg_extras["ratchet_id"] = ratchet_id + + if originator_stamp: + msg_extras["stamp_checked"] = False + msg_extras["stamp_raw"] = originator_stamp[0] + msg_extras["stamp_valid"] = originator_stamp[1] + msg_extras["stamp_value"] = originator_stamp[2] + + except Exception as e: + RNS.log("An error occurred while getting message extras: "+str(e)) + + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() + + if msg_extras != None: + extras = msgpack.packb(msg_extras) + query = "UPDATE lxm set state = ?, extra = ? where lxm_hash = ?" + data = (state, extras, lxm_hash) + + else: + query = "UPDATE lxm set state = ? where lxm_hash = ?" + data = (state, lxm_hash) + + dbc.execute(query, data) + + try: + db.commit() + result = dbc.fetchall() + except Exception as e: + RNS.log("An error occurred while updating message state: "+str(e), RNS.LOG_ERROR) + self.__db_reconnect() + # if not is_retry: + # RNS.log("Retrying operation...", RNS.LOG_ERROR) + # self._db_message_set_state(lxm_hash, state, is_retry=True) def _db_message_set_method(self, lxm_hash, method): - db = self.__db_connect() - dbc = db.cursor() - - query = "UPDATE lxm set method = ? where lxm_hash = ?" - data = (method, lxm_hash) - dbc.execute(query, data) - db.commit() - result = dbc.fetchall() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() + + query = "UPDATE lxm set method = ? where lxm_hash = ?" + data = (method, lxm_hash) + dbc.execute(query, data) + + try: + db.commit() + result = dbc.fetchall() + except Exception as e: + RNS.log("An error occurred while updating message method: "+str(e), RNS.LOG_ERROR) + self.__db_reconnect() + # if not is_retry: + # RNS.log("Retrying operation...", RNS.LOG_ERROR) + # self._db_message_set_method(lxm_hash, method, is_retry=True) def message(self, msg_hash): return self._db_message(msg_hash) def _db_message(self, msg_hash): - db = self.__db_connect() - dbc = db.cursor() - - query = "select * from lxm where lxm_hash=:mhash" - dbc.execute(query, {"mhash": msg_hash}) - result = dbc.fetchall() + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() + + query = "select * from lxm where lxm_hash=:mhash" + dbc.execute(query, {"mhash": msg_hash}) + result = dbc.fetchall() - if len(result) < 1: - return None - else: - entry = result[0] - - lxm_method = entry[7] - if lxm_method == LXMF.LXMessage.PAPER: - lxm_data = msgpack.unpackb(entry[10]) - packed_lxm = lxm_data[0] - paper_packed_lxm = lxm_data[1] + if len(result) < 1: + return None else: - packed_lxm = entry[10] + entry = result[0] - lxm = LXMF.LXMessage.unpack_from_bytes(packed_lxm, original_method = lxm_method) - - if lxm.desired_method == LXMF.LXMessage.PAPER: - lxm.paper_packed = paper_packed_lxm - - message = { - "hash": lxm.hash, - "dest": lxm.destination_hash, - "source": lxm.source_hash, - "title": lxm.title, - "content": lxm.content, - "received": entry[5], - "sent": lxm.timestamp, - "state": entry[6], - "method": entry[7], - "lxm": lxm - } - return message - - def _db_message_count(self, context_dest): - db = self.__db_connect() - dbc = db.cursor() - - query = "select count(*) from lxm where dest=:context_dest or source=:context_dest" - dbc.execute(query, {"context_dest": context_dest}) - - result = dbc.fetchall() - - if len(result) < 1: - return None - else: - return result[0][0] - - def _db_messages(self, context_dest, after = None, before = None, limit = None): - db = self.__db_connect() - dbc = db.cursor() - - if after != None and before == None: - query = "select * from lxm where (dest=:context_dest or source=:context_dest) and rx_ts>:after_ts" - dbc.execute(query, {"context_dest": context_dest, "after_ts": after}) - elif after == None and before != None: - query = "select * from lxm where (dest=:context_dest or source=:context_dest) and rx_ts<:before_ts" - dbc.execute(query, {"context_dest": context_dest, "before_ts": before}) - elif after != None and before != None: - query = "select * from lxm where (dest=:context_dest or source=:context_dest) and rx_ts<:before_ts and rx_ts>:after_ts" - dbc.execute(query, {"context_dest": context_dest, "before_ts": before, "after_ts": after}) - else: - query = "select * from lxm where dest=:context_dest or source=:context_dest" - dbc.execute(query, {"context_dest": context_dest}) - - result = dbc.fetchall() - - if len(result) < 1: - return None - else: - messages = [] - for entry in result: lxm_method = entry[7] if lxm_method == LXMF.LXMessage.PAPER: lxm_data = msgpack.unpackb(entry[10]) @@ -2297,17 +2531,17 @@ class SidebandCore(): else: packed_lxm = entry[10] - lxm = LXMF.LXMessage.unpack_from_bytes(packed_lxm, original_method = lxm_method) - - if lxm.desired_method == LXMF.LXMessage.PAPER: - lxm.paper_packed = paper_packed_lxm - extras = None try: extras = msgpack.unpackb(entry[11]) except: pass - + + lxm = LXMF.LXMessage.unpack_from_bytes(packed_lxm, original_method = lxm_method) + + if lxm.desired_method == LXMF.LXMessage.PAPER: + lxm.paper_packed = paper_packed_lxm + message = { "hash": lxm.hash, "dest": lxm.destination_hash, @@ -2321,13 +2555,87 @@ class SidebandCore(): "lxm": lxm, "extras": extras, } + return message - messages.append(message) - if len(messages) > limit: - messages = messages[-limit:] - return messages + def _db_message_count(self, context_dest): + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() + + query = "select count(*) from lxm where dest=:context_dest or source=:context_dest" + dbc.execute(query, {"context_dest": context_dest}) - def _db_save_lxm(self, lxm, context_dest, originator = False, own_command = False): + result = dbc.fetchall() + + if len(result) < 1: + return None + else: + return result[0][0] + + def _db_messages(self, context_dest, after = None, before = None, limit = None): + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() + + if after != None and before == None: + query = "select * from lxm where (dest=:context_dest or source=:context_dest) and rx_ts>:after_ts" + dbc.execute(query, {"context_dest": context_dest, "after_ts": after}) + elif after == None and before != None: + query = "select * from lxm where (dest=:context_dest or source=:context_dest) and rx_ts<:before_ts" + dbc.execute(query, {"context_dest": context_dest, "before_ts": before}) + elif after != None and before != None: + query = "select * from lxm where (dest=:context_dest or source=:context_dest) and rx_ts<:before_ts and rx_ts>:after_ts" + dbc.execute(query, {"context_dest": context_dest, "before_ts": before, "after_ts": after}) + else: + query = "select * from lxm where dest=:context_dest or source=:context_dest" + dbc.execute(query, {"context_dest": context_dest}) + + result = dbc.fetchall() + + if len(result) < 1: + return None + else: + messages = [] + for entry in result: + lxm_method = entry[7] + if lxm_method == LXMF.LXMessage.PAPER: + lxm_data = msgpack.unpackb(entry[10]) + packed_lxm = lxm_data[0] + paper_packed_lxm = lxm_data[1] + else: + packed_lxm = entry[10] + + lxm = LXMF.LXMessage.unpack_from_bytes(packed_lxm, original_method = lxm_method) + + if lxm.desired_method == LXMF.LXMessage.PAPER: + lxm.paper_packed = paper_packed_lxm + + extras = None + try: + extras = msgpack.unpackb(entry[11]) + except: + pass + + message = { + "hash": lxm.hash, + "dest": lxm.destination_hash, + "source": lxm.source_hash, + "title": lxm.title, + "content": lxm.content, + "received": entry[5], + "sent": lxm.timestamp, + "state": entry[6], + "method": entry[7], + "lxm": lxm, + "extras": extras, + } + + messages.append(message) + if len(messages) > limit: + messages = messages[-limit:] + return messages + + def _db_save_lxm(self, lxm, context_dest, originator = False, own_command = False, is_retry = False): state = lxm.state packed_telemetry = None @@ -2367,83 +2675,114 @@ class SidebandCore(): RNS.log("Received telemetry stream field with no data: "+str(lxm.fields[LXMF.FIELD_TELEMETRY_STREAM]), RNS.LOG_DEBUG) if own_command or len(lxm.content) != 0 or len(lxm.title) != 0: + with self.db_lock: + db = self.__db_connect() + dbc = db.cursor() + + if not lxm.packed: + lxm.pack() + + if lxm.method == LXMF.LXMessage.PAPER: + packed_lxm = msgpack.packb([lxm.packed, lxm.paper_packed]) + else: + packed_lxm = lxm.packed + + extras = {} + if lxm.rssi or lxm.snr or lxm.q: + extras["rssi"] = lxm.rssi + extras["snr"] = lxm.snr + extras["q"] = lxm.q + + if lxm.stamp_checked: + extras["stamp_checked"] = True + extras["stamp_valid"] = lxm.stamp_valid + extras["stamp_value"] = lxm.stamp_value + extras["stamp_raw"] = lxm.stamp + + if lxm.ratchet_id: + extras["ratchet_id"] = lxm.ratchet_id + + if packed_telemetry != None: + extras["packed_telemetry"] = packed_telemetry + + extras = msgpack.packb(extras) + + query = "INSERT INTO lxm (lxm_hash, dest, source, title, tx_ts, rx_ts, state, method, t_encrypted, t_encryption, data, extra) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)" + data = ( + lxm.hash, + lxm.destination_hash, + lxm.source_hash, + lxm.title, + lxm.timestamp, + time.time(), + state, + lxm.method, + lxm.transport_encrypted, + lxm.transport_encryption, + packed_lxm, + extras + ) + + dbc.execute(query, data) + + try: + db.commit() + except Exception as e: + RNS.log("An error occurred while saving message to database: "+str(e), RNS.LOG_ERROR) + self.__db_reconnect() + # if not is_retry: + # RNS.log("Retrying operation...", RNS.LOG_ERROR) + # self._db_save_lxm(lxm, context_dest, originator = originator, own_command = own_command, is_retry = True) + # return + + self.__event_conversation_changed(context_dest) + + def _db_save_announce(self, destination_hash, app_data, dest_type="lxmf.delivery"): + with self.db_lock: db = self.__db_connect() dbc = db.cursor() - if not lxm.packed: - lxm.pack() + query = "delete from announce where id is NULL or id not in (select id from announce order by received desc limit "+str(self.MAX_ANNOUNCES)+")" + dbc.execute(query) - if lxm.method == LXMF.LXMessage.PAPER: - packed_lxm = msgpack.packb([lxm.packed, lxm.paper_packed]) - else: - packed_lxm = lxm.packed + query = "delete from announce where (source=:source);" + dbc.execute(query, {"source": destination_hash}) - extras = {} - if lxm.rssi or lxm.snr or lxm.q: - extras["rssi"] = lxm.rssi - extras["snr"] = lxm.snr - extras["q"] = lxm.q + now = time.time() + hash_material = str(time).encode("utf-8")+destination_hash+app_data+dest_type.encode("utf-8") + announce_hash = RNS.Identity.full_hash(hash_material) - if packed_telemetry != None: - extras["packed_telemetry"] = packed_telemetry - - extras = msgpack.packb(extras) - - query = "INSERT INTO lxm (lxm_hash, dest, source, title, tx_ts, rx_ts, state, method, t_encrypted, t_encryption, data, extra) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)" + query = "INSERT INTO announce (id, received, source, data, dest_type) values (?, ?, ?, ?, ?)" data = ( - lxm.hash, - lxm.destination_hash, - lxm.source_hash, - lxm.title, - lxm.timestamp, - time.time(), - state, - lxm.method, - lxm.transport_encrypted, - lxm.transport_encryption, - packed_lxm, - extras + announce_hash, + now, + destination_hash, + app_data, + dest_type, ) dbc.execute(query, data) db.commit() - self.__event_conversation_changed(context_dest) - - def _db_save_announce(self, destination_hash, app_data, dest_type="lxmf.delivery"): - db = self.__db_connect() - dbc = db.cursor() - - query = "delete from announce where id is NULL or id not in (select id from announce order by received desc limit "+str(self.MAX_ANNOUNCES)+")" - dbc.execute(query) - - query = "delete from announce where (source=:source);" - dbc.execute(query, {"source": destination_hash}) - - now = time.time() - hash_material = str(time).encode("utf-8")+destination_hash+app_data+dest_type.encode("utf-8") - announce_hash = RNS.Identity.full_hash(hash_material) - - query = "INSERT INTO announce (id, received, source, data, dest_type) values (?, ?, ?, ?, ?)" - data = ( - announce_hash, - now, - destination_hash, - app_data, - dest_type, - ) - - dbc.execute(query, data) - db.commit() - def lxmf_announce(self, attached_interface=None): if self.is_standalone or self.is_service: - self.lxmf_destination.announce(attached_interface=attached_interface) + if self.config["lxmf_require_stamps"]: + self.message_router.set_inbound_stamp_cost(self.lxmf_destination.hash, self.config["lxmf_inbound_stamp_cost"]) + self.message_router.announce(self.lxmf_destination.hash, attached_interface=attached_interface) + else: + self.message_router.set_inbound_stamp_cost(self.lxmf_destination.hash, None) + self.lxmf_destination.announce(attached_interface=attached_interface) self.last_lxmf_announce = time.time() - self.next_auto_announce = time.time() + 60*(random.random()*(SidebandCore.AUTO_ANNOUNCE_RANDOM_MAX-SidebandCore.AUTO_ANNOUNCE_RANDOM_MIN)) + self.next_auto_announce = time.time() + 60*(random.random()*(SidebandCore.AUTO_ANNOUNCE_RANDOM_MAX-SidebandCore.AUTO_ANNOUNCE_RANDOM_MIN)+SidebandCore.AUTO_ANNOUNCE_RANDOM_MIN) RNS.log("Next auto announce in "+RNS.prettytime(self.next_auto_announce-time.time()), RNS.LOG_DEBUG) self.setstate("wants.announce", False) + else: + if self.config["lxmf_require_stamps"]: + self.message_router.set_inbound_stamp_cost(self.lxmf_destination.hash, self.config["lxmf_inbound_stamp_cost"]) + else: + self.message_router.set_inbound_stamp_cost(self.lxmf_destination.hash, None) + self.setstate("wants.announce", True) def run_telemetry(self): @@ -2482,33 +2821,42 @@ class SidebandCore(): def update_telemetry(self): try: + try: + latest_telemetry = deepcopy(self.latest_telemetry) + except: + latest_telemetry = None + telemetry = self.get_telemetry() packed_telemetry = self.get_packed_telemetry() telemetry_changed = False if telemetry != None and packed_telemetry != None: - if self.latest_telemetry == None or len(telemetry) != len(self.latest_telemetry): + if latest_telemetry == None or len(telemetry) != len(latest_telemetry): telemetry_changed = True - for sn in telemetry: - if telemetry_changed: - break + if latest_telemetry != None: - if sn != "time": - if sn in self.latest_telemetry: - if telemetry[sn] != self.latest_telemetry[sn]: - telemetry_changed = True - else: - telemetry_changed = True + if not telemetry_changed: + for sn in telemetry: + if telemetry_changed: + break - if self.latest_telemetry != None: - for sn in self.latest_telemetry: - if telemetry_changed: - break + if sn != "time": + if sn in latest_telemetry: + if telemetry[sn] != latest_telemetry[sn]: + telemetry_changed = True + else: + telemetry_changed = True - if sn != "time": - if not sn in telemetry: - telemetry_changed = True + if not telemetry_changed: + for sn in latest_telemetry: + + if telemetry_changed: + break + + if sn != "time": + if not sn in telemetry: + telemetry_changed = True if telemetry_changed: self.telemetry_changes += 1 @@ -2622,6 +2970,19 @@ class SidebandCore(): RNS.log("Error while querying for key: "+str(e), RNS.LOG_ERROR) return False + def _update_delivery_limits(self): + try: + if self.config["lxm_limit_1mb"]: + lxm_limit = 1000 + else: + lxm_limit = self.default_lxm_limit + if self.message_router.delivery_per_transfer_limit != lxm_limit: + self.message_router.delivery_per_transfer_limit = lxm_limit + RNS.log("Updated delivery limit to "+RNS.prettysize(self.message_router.delivery_per_transfer_limit*1000), RNS.LOG_DEBUG) + + except Exception as e: + RNS.log("Error while updating LXMF router delivery limit: "+str(e), RNS.LOG_ERROR) + def _service_jobs(self): if self.is_service: last_usb_discovery = time.time() @@ -2816,6 +3177,8 @@ class SidebandCore(): if self.owner_service != None: self.owner_service.update_location_provider() + self._update_delivery_limits() + if self.config["lxmf_periodic_sync"] == True: if self.getpersistent("lxmf.lastsync") == None: self.setpersistent("lxmf.lastsync", time.time()) @@ -3407,13 +3770,26 @@ class SidebandCore(): if self.config["lxm_limit_1mb"]: lxm_limit = 1000 else: - lxm_limit = 128*1000 + lxm_limit = self.default_lxm_limit self.message_router = LXMF.LXMRouter(identity = self.identity, storagepath = self.lxmf_storage, autopeer = True, delivery_limit = lxm_limit) self.message_router.register_delivery_callback(self.lxmf_delivery) - self.lxmf_destination = self.message_router.register_delivery_identity(self.identity, display_name=self.config["display_name"]) - self.lxmf_destination.set_default_app_data(self.get_display_name_bytes) + configured_stamp_cost = None + if self.config["lxmf_require_stamps"]: + configured_stamp_cost = self.config["lxmf_inbound_stamp_cost"] + + self.lxmf_destination = self.message_router.register_delivery_identity(self.identity, display_name=self.config["display_name"], stamp_cost=configured_stamp_cost) + if self.config["lxmf_ignore_invalid_stamps"]: + self.message_router.enforce_stamps() + else: + self.message_router.ignore_stamps() + + # TODO: Update to announce call in LXMF when full 0.5.0 support is added (get app data from LXMRouter instead) + # Currently overrides the LXMF routers auto-generated announce data so that Sideband will announce old-format + # LXMF announces if require_stamps is disabled. + # if not self.config["lxmf_require_stamps"]: + # self.lxmf_destination.set_default_app_data(self.get_display_name_bytes) self.rns_dir = RNS.Reticulum.configdir @@ -3428,19 +3804,31 @@ class SidebandCore(): else: self.set_active_propagation_node(None) + def update_ignore_invalid_stamps(self): + if self.config["lxmf_ignore_invalid_stamps"]: + self.message_router.enforce_stamps() + else: + self.message_router.ignore_stamps() + def message_notification_no_display(self, message): self.message_notification(message, no_display=True) def message_notification(self, message, no_display=False): if message.state == LXMF.LXMessage.FAILED and hasattr(message, "try_propagation_on_fail") and message.try_propagation_on_fail: - RNS.log("Direct delivery of "+str(message)+" failed. Retrying as propagated message.", RNS.LOG_VERBOSE) - message.try_propagation_on_fail = None - message.delivery_attempts = 0 - del message.next_delivery_attempt - message.packed = None - message.desired_method = LXMF.LXMessage.PROPAGATED - self._db_message_set_method(message.hash, LXMF.LXMessage.PROPAGATED) - self.message_router.handle_outbound(message) + if hasattr(message, "stamp_generation_failed") and message.stamp_generation_failed == True: + RNS.log(f"Could not send {message} due to a stamp generation failure", RNS.LOG_ERROR) + if not no_display: + self.lxm_ingest(message, originator=True) + else: + RNS.log("Direct delivery of "+str(message)+" failed. Retrying as propagated message.", RNS.LOG_VERBOSE) + message.try_propagation_on_fail = None + message.delivery_attempts = 0 + if hasattr(message, "next_delivery_attempt"): + del message.next_delivery_attempt + message.packed = None + message.desired_method = LXMF.LXMessage.PROPAGATED + self._db_message_set_method(message.hash, LXMF.LXMessage.PROPAGATED) + self.message_router.handle_outbound(message) else: if not no_display: self.lxm_ingest(message, originator=True) @@ -3450,7 +3838,7 @@ class SidebandCore(): try: telemeter = Telemeter.from_packed(message.fields[LXMF.FIELD_TELEMETRY]) telemetry_timebase = telemeter.read_all()["time"]["utc"] - RNS.log("Setting last successul telemetry timebase for "+RNS.prettyhexrep(message.destination_hash)+" to "+str(telemetry_timebase)) + RNS.log("Setting last successul telemetry timebase for "+RNS.prettyhexrep(message.destination_hash)+" to "+str(telemetry_timebase), RNS.LOG_DEBUG) self.setpersistent(f"telemetry.{RNS.hexrep(message.destination_hash, delimit=False)}.last_send_success_timebase", telemetry_timebase) except Exception as e: RNS.log("Error while setting last successul telemetry timebase for "+RNS.prettyhexrep(message.destination_hash), RNS.LOG_DEBUG) @@ -3463,10 +3851,11 @@ class SidebandCore(): if send_telemetry and self.latest_packed_telemetry != None: telemeter = Telemeter.from_packed(self.latest_packed_telemetry) telemetry_timebase = telemeter.read_all()["time"]["utc"] - if telemetry_timebase > (self.getpersistent(f"telemetry.{RNS.hexrep(context_dest, delimit=False)}.last_send_success_timebase") or 0): + last_success_tb = (self.getpersistent(f"telemetry.{RNS.hexrep(context_dest, delimit=False)}.last_send_success_timebase") or 0) + if telemetry_timebase > last_success_tb: RNS.log("Embedding own telemetry in message since current telemetry is newer than latest successful timebase", RNS.LOG_DEBUG) else: - RNS.log("Not embedding own telemetry in message since current telemetry is not newer than latest successful timebase", RNS.LOG_DEBUG) + RNS.log("Not embedding own telemetry in message since current telemetry timebase ("+str(telemetry_timebase)+") is not newer than latest successful timebase ("+str(last_success_tb)+")", RNS.LOG_DEBUG) send_telemetry = False send_appearance = False if signal_already_sent: @@ -3505,7 +3894,8 @@ class SidebandCore(): source = self.lxmf_destination desired_method = LXMF.LXMessage.PAPER - lxm = LXMF.LXMessage(dest, source, content, title="", desired_method=desired_method, fields = self.get_message_fields(destination_hash)) + # TODO: Should paper messages also include a ticket to trusted peers? + lxm = LXMF.LXMessage(dest, source, content, title="", desired_method=desired_method, fields = self.get_message_fields(destination_hash), include_ticket=self.is_trusted(destination_hash)) self.lxm_ingest(lxm, originator=True) @@ -3522,6 +3912,13 @@ class SidebandCore(): RNS.log("An error occurred while getting message transfer progress: "+str(e), RNS.LOG_ERROR) return None + def get_lxm_stamp_cost(self, lxm_hash): + try: + return self.message_router.get_outbound_lxm_stamp_cost(lxm_hash) + except Exception as e: + RNS.log("An error occurred while getting message transfer stamp cost: "+str(e), RNS.LOG_ERROR) + return None + def send_message(self, content, destination_hash, propagation, skip_fields=False, no_display=False, attachment = None, image = None, audio = None): try: if content == "": @@ -3545,8 +3942,10 @@ class SidebandCore(): fields[LXMF.FIELD_FILE_ATTACHMENTS] = [attachment] if image != None: fields[LXMF.FIELD_IMAGE] = image + if audio != None: + fields[LXMF.FIELD_AUDIO] = audio - lxm = LXMF.LXMessage(dest, source, content, title="", desired_method=desired_method, fields = fields) + lxm = LXMF.LXMessage(dest, source, content, title="", desired_method=desired_method, fields = fields, include_ticket=self.is_trusted(destination_hash)) if not no_display: lxm.register_delivery_callback(self.message_notification) @@ -3560,6 +3959,7 @@ class SidebandCore(): lxm.try_propagation_on_fail = True self.message_router.handle_outbound(lxm) + if not no_display: self.lxm_ingest(lxm, originator=True) @@ -3567,6 +3967,7 @@ class SidebandCore(): except Exception as e: RNS.log("Error while sending message: "+str(e), RNS.LOG_ERROR) + RNS.trace_exception(e) return False def send_command(self, content, destination_hash, propagation): @@ -3598,7 +3999,7 @@ class SidebandCore(): else: desired_method = LXMF.LXMessage.DIRECT - lxm = LXMF.LXMessage(dest, source, "", title="", desired_method=desired_method, fields = {LXMF.FIELD_COMMANDS: commands}) + lxm = LXMF.LXMessage(dest, source, "", title="", desired_method=desired_method, fields = {LXMF.FIELD_COMMANDS: commands}, include_ticket=self.is_trusted(destination_hash)) lxm.register_delivery_callback(self.message_notification) lxm.register_failed_callback(self.message_notification) @@ -3666,6 +4067,7 @@ class SidebandCore(): def lxm_ingest(self, message, originator = False): should_notify = False is_trusted = False + ptt_enabled = False telemetry_only = False own_command = False unread_reason_tx = False @@ -3676,13 +4078,17 @@ class SidebandCore(): else: context_dest = message.source_hash is_trusted = self.is_trusted(context_dest) + ptt_enabled = self.ptt_enabled(context_dest) if originator and LXMF.FIELD_COMMANDS in message.fields: own_command = True if self._db_message(message.hash): RNS.log("Message exists, setting state to: "+str(message.state), RNS.LOG_DEBUG) - self._db_message_set_state(message.hash, message.state) + stamp = None + if originator and message.stamp != None: + stamp = [message.stamp, message.stamp_valid, message.stamp_value] + self._db_message_set_state(message.hash, message.state, ratchet_id=message.ratchet_id, originator_stamp=stamp) else: RNS.log("Message does not exist, saving", RNS.LOG_DEBUG) self._db_save_lxm(message, context_dest, originator, own_command=own_command) @@ -3695,6 +4101,11 @@ class SidebandCore(): RNS.log("Squelching notification due to telemetry-only message", RNS.LOG_DEBUG) telemetry_only = True + if LXMF.FIELD_TICKET in message.fields: + if self.is_service: + RNS.log("Notifying UI of newly arrived delivery ticket", RNS.LOG_DEBUG) + self.setstate("app.flags.new_ticket", True) + if not telemetry_only: if self._db_conversation(context_dest) == None: self._db_create_conversation(context_dest) @@ -3718,6 +4129,10 @@ class SidebandCore(): if self.gui_display() == "conversations_screen" and self.gui_foreground(): should_notify = False + if not originator and LXMF.FIELD_AUDIO in message.fields and ptt_enabled: + self.ptt_event(message) + should_notify = False + if self.is_client: should_notify = False @@ -3729,9 +4144,107 @@ class SidebandCore(): text = message.content.decode("utf-8") notification_content = text[:nlen] if len(text) > nlen: - text += "..." + notification_content += " [...]" - self.notify(title=self.peer_display_name(context_dest), content=notification_content, group="LXM", context_id=RNS.hexrep(context_dest, delimit=False)) + if len(text) < 2 and LXMF.FIELD_AUDIO in message.fields: + notification_content = "Audio message" + if len(text) < 2 and LXMF.FIELD_IMAGE in message.fields: + notification_content = "Image" + if len(text) < 2 and LXMF.FIELD_FILE_ATTACHMENTS in message.fields: + notification_content = "File attachment" + + try: + self.notify(title=self.peer_display_name(context_dest), content=notification_content, group="LXM", context_id=RNS.hexrep(context_dest, delimit=False)) + except Exception as e: + RNS.log("Could not post notification for received message: "+str(e), RNS.LOG_ERROR) + + def ptt_playback(self, message): + ptt_timeout = 60 + event_time = time.time() + while hasattr(self, "msg_sound") and self.msg_sound != None and self.msg_sound.playing() and time.time() < event_time+ptt_timeout: + time.sleep(0.1) + time.sleep(0.5) + + if self.msg_audio == None: + if RNS.vendor.platformutils.is_android(): + from plyer import audio + else: + from sbapp.plyer import audio + + RNS.log("Audio init done") + self.msg_audio = audio + try: + temp_path = None + audio_field = message.fields[LXMF.FIELD_AUDIO] + if self.last_msg_audio != audio_field[1]: + RNS.log("Reloading audio source", RNS.LOG_DEBUG) + if len(audio_field[1]) > 10: + self.last_msg_audio = audio_field[1] + else: + self.last_msg_audio = None + return + + if audio_field[0] == LXMF.AM_OPUS_OGG: + temp_path = self.rec_cache+"/msg.ogg" + with open(temp_path, "wb") as af: + af.write(self.last_msg_audio) + + elif audio_field[0] >= LXMF.AM_CODEC2_700C and audio_field[0] <= LXMF.AM_CODEC2_3200: + temp_path = self.rec_cache+"/msg.ogg" + from sideband.audioproc import samples_to_ogg, decode_codec2, detect_codec2 + + target_rate = 8000 + if RNS.vendor.platformutils.is_linux(): + target_rate = 48000 + + if detect_codec2(): + if samples_to_ogg(decode_codec2(audio_field[1], audio_field[0]), temp_path, input_rate=8000, output_rate=target_rate): + RNS.log("Wrote OGG file to: "+temp_path, RNS.LOG_DEBUG) + else: + RNS.log("OGG write failed", RNS.LOG_DEBUG) + else: + self.last_msg_audio = None + return + + else: + # Unimplemented audio type + pass + + self.msg_sound = self.msg_audio + self.msg_sound._file_path = temp_path + self.msg_sound.reload() + + if self.msg_sound != None: + RNS.log("Starting playback", RNS.LOG_DEBUG) + self.msg_sound.play() + else: + RNS.log("Playback was requested, but no audio data was loaded for playback", RNS.LOG_ERROR) + + except Exception as e: + RNS.log("Error while playing message audio:"+str(e)) + RNS.trace_exception(e) + + def ptt_event(self, message): + def ptt_job(): + try: + self.ptt_playback_lock.acquire() + while self.ui_recording: + time.sleep(0.5) + self.ptt_playback(message) + except Exception as e: + RNS.log("Error while starting playback for PTT-enabled conversation: "+str(e), RNS.LOG_ERROR) + finally: + self.ptt_playback_lock.release() + + threading.Thread(target=ptt_job, daemon=True).start() + + def ui_started_recording(self): + self.ui_recording = True + self.service_rpc_set_ui_recording(True) + + def ui_stopped_recording(self): + self.ui_recording = False + self.service_rpc_set_ui_recording(False) def start(self): self._db_clean_messages() @@ -3742,7 +4255,7 @@ class SidebandCore(): thread.start() self.setstate("core.started", True) - RNS.log("Sideband Core "+str(self)+" started") + RNS.log("Sideband Core "+str(self)+" version "+str(self.version_str)+" started") def stop_webshare(self): if self.webshare_server != None: diff --git a/sbapp/sideband/sense.py b/sbapp/sideband/sense.py index ff7f1ae..aa1c3fa 100644 --- a/sbapp/sideband/sense.py +++ b/sbapp/sideband/sense.py @@ -53,6 +53,7 @@ class Telemeter(): Sensor.SID_PROXIMITY: Proximity, Sensor.SID_POWER_CONSUMPTION: PowerConsumption, Sensor.SID_POWER_PRODUCTION: PowerProduction, Sensor.SID_PROCESSOR: Processor, Sensor.SID_RAM: RandomAccessMemory, Sensor.SID_NVM: NonVolatileMemory, + Sensor.SID_CUSTOM: Custom, Sensor.SID_TANK: Tank, Sensor.SID_FUEL: Fuel, } self.available = { "time": Sensor.SID_TIME, @@ -65,6 +66,7 @@ class Telemeter(): "acceleration": Sensor.SID_ACCELERATION, "proximity": Sensor.SID_PROXIMITY, "power_consumption": Sensor.SID_POWER_CONSUMPTION, "power_production": Sensor.SID_POWER_PRODUCTION, "processor": Sensor.SID_PROCESSOR, "ram": Sensor.SID_RAM, "nvm": Sensor.SID_NVM, + "custom": Sensor.SID_CUSTOM, "tank": Sensor.SID_TANK, "fuel": Sensor.SID_FUEL } self.from_packed = from_packed self.sensors = {} @@ -191,6 +193,9 @@ class Sensor(): SID_PROCESSOR = 0x13 SID_RAM = 0x14 SID_NVM = 0x15 + SID_TANK = 0x16 + SID_FUEL = 0x17 + SID_CUSTOM = 0xff def __init__(self, sid = None, stale_time = None): self._telemeter = None @@ -461,7 +466,7 @@ class Battery(Sensor): if RNS.vendor.platformutils.is_android(): self.battery.get_state() b = self.battery.status - self.data = {"charge_percent": b["percentage"], "charging": b["isCharging"]} + self.data = {"charge_percent": b["percentage"], "charging": b["isCharging"], "temperature": None} elif RNS.vendor.platformutils.is_linux(): if self.battery_node_name: @@ -480,7 +485,7 @@ class Battery(Sensor): is_charging = output['POWER_SUPPLY_STATUS'] == 'Charging' charge_percent = float(output['POWER_SUPPLY_CAPACITY']) - self.data = {"charge_percent": round(charge_percent, 1), "charging": is_charging} + self.data = {"charge_percent": round(charge_percent, 1), "charging": is_charging, "temperature": None} except: self.data = None @@ -490,14 +495,20 @@ class Battery(Sensor): if d == None: return None else: - return [round(d["charge_percent"],1), d["charging"]] + return [round(d["charge_percent"],1), d["charging"], d["temperature"]] def unpack(self, packed): try: if packed == None: return None else: - return {"charge_percent": round(packed[0], 1), "charging": packed[1]} + unpacked = {"charge_percent": round(packed[0], 1), "charging": packed[1]} + if len(packed) > 2: + unpacked["temperature"] = packed[2] + else: + unpacked["temperature"] = None + + return unpacked except: return None @@ -507,6 +518,7 @@ class Battery(Sensor): d = self.data p = d["charge_percent"] + t = d["temperature"] if d["charging"]: charge_string = "charging" else: @@ -515,7 +527,7 @@ class Battery(Sensor): rendered = { "icon": "battery-outline", "name": "Battery", - "values": {"percent": p, "_meta": charge_string}, + "values": {"percent": p, "temperature": t, "_meta": charge_string}, } if d["charging"]: @@ -1077,6 +1089,17 @@ class MagneticField(Sensor): except: return None + def render(self, relative_to=None): + if self.data == None: + return None + + rendered = { + "icon": "magnet", + "name": "Magnetic Field", + "values": { "x": self.data["x"], "y": self.data["y"], "z": self.data["z"] }, + } + return rendered + class AmbientLight(Sensor): SID = Sensor.SID_AMBIENT_LIGHT STALE_TIME = 1 @@ -1189,6 +1212,17 @@ class Gravity(Sensor): except: return None + def render(self, relative_to=None): + if self.data == None: + return None + + rendered = { + "icon": "arrow-down-thin-circle-outline", + "name": "Gravity", + "values": { "x": self.data["x"], "y": self.data["y"], "z": self.data["z"] }, + } + return rendered + class AngularVelocity(Sensor): SID = Sensor.SID_ANGULAR_VELOCITY STALE_TIME = 1 @@ -1235,6 +1269,17 @@ class AngularVelocity(Sensor): except: return None + def render(self, relative_to=None): + if self.data == None: + return None + + rendered = { + "icon": "orbit", + "name": "Angular Velocity", + "values": { "x": self.data["x"], "y": self.data["y"], "z": self.data["z"] }, + } + return rendered + class Acceleration(Sensor): SID = Sensor.SID_ACCELERATION STALE_TIME = 1 @@ -1326,6 +1371,17 @@ class Proximity(Sensor): except: return None + def render(self, relative_to=None): + if self.data == None: + return None + + rendered = { + "icon": "signal-distance-variant", + "name": "Proximity", + "values": { "triggered": self.data }, + } + return rendered + class PowerConsumption(Sensor): SID = Sensor.SID_POWER_CONSUMPTION STALE_TIME = 5 @@ -1339,7 +1395,7 @@ class PowerConsumption(Sensor): def teardown_sensor(self): self.data = None - def update_consumer(self, power, type_label=None): + def update_consumer(self, power, type_label=None, custom_icon=None): if type_label == None: type_label = 0x00 elif type(type_label) != str: @@ -1348,7 +1404,7 @@ class PowerConsumption(Sensor): if self.data == None: self.data = {} - self.data[type_label] = power + self.data[type_label] = [power, custom_icon] return True def remove_consumer(self, type_label=None): @@ -1397,7 +1453,7 @@ class PowerConsumption(Sensor): label = "Power consumption" else: label = type_label - consumers.append({"label": label, "w": self.data[type_label]}) + consumers.append({"label": label, "w": self.data[type_label][0], "custom_icon": self.data[type_label][1]}) rendered = { "icon": "power-plug-outline", @@ -1420,7 +1476,7 @@ class PowerProduction(Sensor): def teardown_sensor(self): self.data = None - def update_producer(self, power, type_label=None): + def update_producer(self, power, type_label=None, custom_icon=None): if type_label == None: type_label = 0x00 elif type(type_label) != str: @@ -1429,7 +1485,7 @@ class PowerProduction(Sensor): if self.data == None: self.data = {} - self.data[type_label] = power + self.data[type_label] = [power, custom_icon] return True def remove_producer(self, type_label=None): @@ -1478,7 +1534,7 @@ class PowerProduction(Sensor): label = "Power Production" else: label = type_label - producers.append({"label": label, "w": self.data[type_label]}) + producers.append({"label": label, "w": self.data[type_label][0], "custom_icon": self.data[type_label][1]}) rendered = { "icon": "lightning-bolt", @@ -1488,12 +1544,534 @@ class PowerProduction(Sensor): return rendered -# TODO: Implement class Processor(Sensor): - pass + SID = Sensor.SID_PROCESSOR + STALE_TIME = 5 + + def __init__(self): + super().__init__(type(self).SID, type(self).STALE_TIME) + + def setup_sensor(self): + self.update_data() + + def teardown_sensor(self): + self.data = None + + def update_entry(self, current_load=0, load_avgs=None, clock=None, type_label=None): + if type_label == None: + type_label = 0x00 + elif type(type_label) != str: + return False + + if self.data == None: + self.data = {} + + self.data[type_label] = [current_load, load_avgs, clock] + return True + + def remove_entry(self, type_label=None): + if type_label == None: + type_label = 0x00 + + if type_label in self.data: + self.data.pop(type_label) + return True + + return False + + def update_data(self): + pass + + def pack(self): + d = self.data + if d == None: + return None + else: + packed = [] + for type_label in self.data: + packed.append([type_label, self.data[type_label]]) + return packed + + def unpack(self, packed): + try: + if packed == None: + return None + else: + unpacked = {} + for entry in packed: + unpacked[entry[0]] = entry[1] + return unpacked + + except: + return None + + def render(self, relative_to=None): + if self.data == None: + return None + + entries = [] + for type_label in self.data: + if type_label == 0x00: + label = "Storage" + else: + label = type_label + entries.append({ + "label": label, + "current_load": self.data[type_label][0], + "load_avgs": self.data[type_label][1], + "clock": self.data[type_label][2], + }) + + rendered = { + "icon": "chip", + "name": "Processor", + "values": entries, + } + + return rendered class RandomAccessMemory(Sensor): - pass + SID = Sensor.SID_RAM + STALE_TIME = 5 + + def __init__(self): + super().__init__(type(self).SID, type(self).STALE_TIME) + + def setup_sensor(self): + self.update_data() + + def teardown_sensor(self): + self.data = None + + def update_entry(self, capacity=0, used=0, type_label=None): + if type_label == None: + type_label = 0x00 + elif type(type_label) != str: + return False + + if self.data == None: + self.data = {} + + self.data[type_label] = [capacity, used] + return True + + def remove_entry(self, type_label=None): + if type_label == None: + type_label = 0x00 + + if type_label in self.data: + self.data.pop(type_label) + return True + + return False + + def update_data(self): + pass + + def pack(self): + d = self.data + if d == None: + return None + else: + packed = [] + for type_label in self.data: + packed.append([type_label, self.data[type_label]]) + return packed + + def unpack(self, packed): + try: + if packed == None: + return None + else: + unpacked = {} + for entry in packed: + unpacked[entry[0]] = entry[1] + return unpacked + + except: + return None + + def render(self, relative_to=None): + if self.data == None: + return None + + entries = [] + for type_label in self.data: + if type_label == 0x00: + label = "Storage" + else: + label = type_label + entries.append({ + "label": label, + "capacity": self.data[type_label][0], + "used": self.data[type_label][1], + "free": self.data[type_label][0]-self.data[type_label][1], + "percent": (self.data[type_label][1]/self.data[type_label][0])*100, + }) + + rendered = { + "icon": "memory", + "name": "Random Access Memory", + "values": entries, + } + + return rendered class NonVolatileMemory(Sensor): - pass \ No newline at end of file + SID = Sensor.SID_NVM + STALE_TIME = 5 + + def __init__(self): + super().__init__(type(self).SID, type(self).STALE_TIME) + + def setup_sensor(self): + self.update_data() + + def teardown_sensor(self): + self.data = None + + def update_entry(self, capacity=0, used=0, type_label=None): + if type_label == None: + type_label = 0x00 + elif type(type_label) != str: + return False + + if self.data == None: + self.data = {} + + self.data[type_label] = [capacity, used] + return True + + def remove_entry(self, type_label=None): + if type_label == None: + type_label = 0x00 + + if type_label in self.data: + self.data.pop(type_label) + return True + + return False + + def update_data(self): + pass + + def pack(self): + d = self.data + if d == None: + return None + else: + packed = [] + for type_label in self.data: + packed.append([type_label, self.data[type_label]]) + return packed + + def unpack(self, packed): + try: + if packed == None: + return None + else: + unpacked = {} + for entry in packed: + unpacked[entry[0]] = entry[1] + return unpacked + + except: + return None + + def render(self, relative_to=None): + if self.data == None: + return None + + entries = [] + for type_label in self.data: + if type_label == 0x00: + label = "Storage" + else: + label = type_label + entries.append({ + "label": label, + "capacity": self.data[type_label][0], + "used": self.data[type_label][1], + "free": self.data[type_label][0]-self.data[type_label][1], + "percent": (self.data[type_label][1]/self.data[type_label][0])*100, + }) + + rendered = { + "icon": "harddisk", + "name": "Non-Volatile Memory", + "values": entries, + } + + return rendered + +class Custom(Sensor): + SID = Sensor.SID_CUSTOM + STALE_TIME = 5 + + def __init__(self): + super().__init__(type(self).SID, type(self).STALE_TIME) + + def setup_sensor(self): + self.update_data() + + def teardown_sensor(self): + self.data = None + + def update_entry(self, value=None, type_label=None, custom_icon=None): + if type_label == None: + type_label = 0x00 + elif type(type_label) != str: + return False + + if self.data == None: + self.data = {} + + self.data[type_label] = [value, custom_icon] + return True + + def remove_entry(self, type_label=None): + if type_label == None: + type_label = 0x00 + + if type_label in self.data: + self.data.pop(type_label) + return True + + return False + + def update_data(self): + pass + + def pack(self): + d = self.data + if d == None: + return None + else: + packed = [] + for type_label in self.data: + packed.append([type_label, self.data[type_label]]) + return packed + + def unpack(self, packed): + try: + if packed == None: + return None + else: + unpacked = {} + for entry in packed: + unpacked[entry[0]] = entry[1] + return unpacked + + except: + return None + + def render(self, relative_to=None): + if self.data == None: + return None + + entries = [] + for type_label in self.data: + if type_label == 0x00: + label = "Custom" + else: + label = type_label + entries.append({ + "label": label, + "value": self.data[type_label][0], + "custom_icon": self.data[type_label][1], + }) + + rendered = { + "icon": "ruler", + "name": "Custom", + "values": entries, + } + + return rendered + + +class Tank(Sensor): + SID = Sensor.SID_TANK + STALE_TIME = 5 + + def __init__(self): + super().__init__(type(self).SID, type(self).STALE_TIME) + + def setup_sensor(self): + self.update_data() + + def teardown_sensor(self): + self.data = None + + def update_entry(self, capacity=0, level=0, unit=None, type_label=None, custom_icon=None): + if type_label == None: + type_label = 0x00 + elif type(type_label) != str: + return False + + if unit != None and type(unit) != str: + return False + + if self.data == None: + self.data = {} + + self.data[type_label] = [capacity, level, unit, custom_icon] + return True + + def remove_entry(self, type_label=None): + if type_label == None: + type_label = 0x00 + + if type_label in self.data: + self.data.pop(type_label) + return True + + return False + + def update_data(self): + pass + + def pack(self): + d = self.data + if d == None: + return None + else: + packed = [] + for type_label in self.data: + packed.append([type_label, self.data[type_label]]) + return packed + + def unpack(self, packed): + try: + if packed == None: + return None + else: + unpacked = {} + for entry in packed: + unpacked[entry[0]] = entry[1] + return unpacked + + except: + return None + + def render(self, relative_to=None): + if self.data == None: + return None + + entries = [] + for type_label in self.data: + if type_label == 0x00: + label = "Tank" + else: + label = type_label + set_unit = self.data[type_label][2] if self.data[type_label][2] != None else "L" + entries.append({ + "label": label, + "unit": set_unit, + "capacity": self.data[type_label][0], + "level": self.data[type_label][1], + "free": self.data[type_label][0]-self.data[type_label][1], + "percent": (self.data[type_label][1]/self.data[type_label][0])*100, + "custom_icon": self.data[type_label][3], + }) + + rendered = { + "icon": "storage-tank", + "name": "Tank", + "values": entries, + } + + return rendered + +class Fuel(Sensor): + SID = Sensor.SID_FUEL + STALE_TIME = 5 + + def __init__(self): + super().__init__(type(self).SID, type(self).STALE_TIME) + + def setup_sensor(self): + self.update_data() + + def teardown_sensor(self): + self.data = None + + def update_entry(self, capacity=0, level=0, unit=None, type_label=None, custom_icon=None): + if type_label == None: + type_label = 0x00 + elif type(type_label) != str: + return False + + if unit != None and type(unit) != str: + return False + + if self.data == None: + self.data = {} + + self.data[type_label] = [capacity, level, unit, custom_icon] + return True + + def remove_entry(self, type_label=None): + if type_label == None: + type_label = 0x00 + + if type_label in self.data: + self.data.pop(type_label) + return True + + return False + + def update_data(self): + pass + + def pack(self): + d = self.data + if d == None: + return None + else: + packed = [] + for type_label in self.data: + packed.append([type_label, self.data[type_label]]) + return packed + + def unpack(self, packed): + try: + if packed == None: + return None + else: + unpacked = {} + for entry in packed: + unpacked[entry[0]] = entry[1] + return unpacked + + except: + return None + + def render(self, relative_to=None): + if self.data == None: + return None + + entries = [] + for type_label in self.data: + if type_label == 0x00: + label = "Fuel" + else: + label = type_label + set_unit = self.data[type_label][2] if self.data[type_label][2] != None else "L" + entries.append({ + "label": label, + "unit": set_unit, + "capacity": self.data[type_label][0], + "level": self.data[type_label][1], + "free": self.data[type_label][0]-self.data[type_label][1], + "percent": (self.data[type_label][1]/self.data[type_label][0])*100, + "custom_icon": self.data[type_label][3], + }) + + rendered = { + "icon": "fuel", + "name": "Fuel", + "values": entries, + } + + return rendered \ No newline at end of file diff --git a/sbapp/ui/conversations.py b/sbapp/ui/conversations.py index 6f7f207..4427af3 100644 --- a/sbapp/ui/conversations.py +++ b/sbapp/ui/conversations.py @@ -35,6 +35,8 @@ class ConvSettings(BoxLayout): trusted = BooleanProperty() telemetry = BooleanProperty() allow_requests = BooleanProperty() + is_object = BooleanProperty() + ptt_enabled = BooleanProperty() class Conversations(): def __init__(self, app): @@ -72,7 +74,16 @@ class Conversations(): # if self.app.sideband.getstate("app.flags.unread_conversations"): # self.clear_list() - self.context_dests = self.app.sideband.list_conversations() + self.context_dests = self.app.sideband.list_conversations(conversations=self.app.include_conversations, objects=self.app.include_objects) + + view_title = "Conversations" + if self.app.include_conversations: + if self.app.include_objects: + view_title = "Conversations & Objects" + elif self.app.include_objects: + view_title = "Objects" + self.screen.ids.conversations_bar.title = view_title + self.update_widget() self.app.sideband.setstate("app.flags.unread_conversations", False) @@ -83,12 +94,12 @@ class Conversations(): context_dest = conv["dest"] unread = conv["unread"] appearance = self.app.sideband.peer_appearance(context_dest, conv=conv) - # is_trusted = self.app.sideband.is_trusted(context_dest) is_trusted = conv["trust"] == 1 + appearance_from_all = self.app.sideband.config["display_style_from_all"] trust_icon = "account-question" da = self.app.sideband.DEFAULT_APPEARANCE - if is_trusted and self.app.sideband.config["display_style_in_contact_list"] and appearance != None and appearance != da: + if (is_trusted or appearance_from_all) and self.app.sideband.config["display_style_in_contact_list"] and appearance != None and appearance != da: if unread: trust_icon = "email" else: @@ -123,6 +134,7 @@ class Conversations(): last_activity = conv["last_activity"] trusted = conv["trust"] == 1 appearance = self.app.sideband.peer_appearance(context_dest, conv=conv) + is_object = self.app.sideband.is_object(context_dest, conv_data=conv) da = self.app.sideband.DEFAULT_APPEARANCE ic_s = 24; ic_p = 14 @@ -135,10 +147,21 @@ class Conversations(): else: ti_color = None + if is_object: + def gen_rel_func(): + def x(ws): + self.app.object_details_action(sender=ws, from_objects=True) + return x + + rel_func = gen_rel_func() + else: + rel_func = self.app.conversation_action + iconl = IconLeftWidget( icon=conv_icon, theme_icon_color=ti_color, icon_color=fg, md_bg_color=bg, - on_release=self.app.conversation_action) + on_release=rel_func) + iconl.source_dest = context_dest iconl._default_icon_pad = dp(ic_p) iconl.icon_size = dp(ic_s) @@ -154,12 +177,10 @@ class Conversations(): remove_widgets = [] for w in self.list.children: if not w.sb_uid in [e["dest"] for e in self.context_dests]: - RNS.log("Should remove "+RNS.prettyhexrep(w.sb_uid)+" from list") remove_widgets.append(w) self.added_item_dests.remove(w.sb_uid) for w in remove_widgets: - RNS.log("Removing "+str(w)) self.list.remove_widget(w) @@ -169,7 +190,10 @@ class Conversations(): last_activity = conv["last_activity"] peer_disp_name = multilingual_markup(escape_markup(str(self.app.sideband.peer_display_name(context_dest))).encode("utf-8")).decode("utf-8") - if not context_dest in self.added_item_dests: + if not context_dest in self.added_item_dests: + existing_conv = self.app.sideband._db_conversation(context_dest) + is_object = self.app.sideband.is_object(context_dest, conv_data=existing_conv) + ptt_enabled = self.app.sideband.ptt_enabled(context_dest, conv_data=existing_conv) iconl = self.get_icon(conv) item = OneLineAvatarIconListItem(text=peer_disp_name, on_release=self.app.conversation_action) item.add_widget(iconl) @@ -184,18 +208,19 @@ class Conversations(): t_s = time.time() dest = self.conversation_dropdown.context_dest try: + cd = self.app.sideband._db_conversation(dest) disp_name = self.app.sideband.raw_display_name(dest) - is_trusted = self.app.sideband.is_trusted(dest) - send_telemetry = self.app.sideband.should_send_telemetry(dest) - allow_requests = self.app.sideband.requests_allowed_from(dest) + is_trusted = self.app.sideband.is_trusted(dest, conv_data=cd) + is_object = self.app.sideband.is_object(dest, conv_data=cd) + ptt_enabled = self.app.sideband.ptt_enabled(dest, conv_data=cd) + send_telemetry = self.app.sideband.should_send_telemetry(dest, conv_data=cd) + allow_requests = self.app.sideband.requests_allowed_from(dest, conv_data=cd) yes_button = MDRectangleFlatButton(text="Save",font_size=dp(18), theme_text_color="Custom", line_color=self.app.color_accept, text_color=self.app.color_accept) no_button = MDRectangleFlatButton(text="Cancel",font_size=dp(18)) - dialog_content = ConvSettings(disp_name=disp_name, context_dest=RNS.hexrep(dest, delimit=False), trusted=is_trusted, telemetry=send_telemetry, allow_requests=allow_requests) - if self.app.sideband.config["input_language"] != None: - dialog_content.ids.name_field.font_name = self.app.sideband.config["input_language"] - else: - dialog_content.ids.name_field.font_name = "" + dialog_content = ConvSettings(disp_name=disp_name, context_dest=RNS.hexrep(dest, delimit=False), trusted=is_trusted, + telemetry=send_telemetry, allow_requests=allow_requests, is_object=is_object, ptt_enabled=ptt_enabled) + dialog_content.ids.name_field.font_name = self.app.input_font dialog = MDDialog( title="Edit Conversation", @@ -212,6 +237,8 @@ class Conversations(): trusted = dialog.d_content.ids["trusted_switch"].active telemetry = dialog.d_content.ids["telemetry_switch"].active allow_requests = dialog.d_content.ids["allow_requests_switch"].active + conv_is_object = dialog.d_content.ids["is_object_switch"].active + ptt_is_enabled = dialog.d_content.ids["ptt_enabled_switch"].active if trusted: self.app.sideband.trusted_conversation(dest) else: @@ -227,6 +254,18 @@ class Conversations(): else: self.app.sideband.disallow_requests_from(dest) + if conv_is_object: + self.app.sideband.conversation_set_object(dest, True) + else: + self.app.sideband.conversation_set_object(dest, False) + + if ptt_is_enabled: + RNS.log("Setting PTT enabled") + self.app.sideband.conversation_set_ptt_enabled(dest, True) + else: + RNS.log("Setting PTT disabled") + self.app.sideband.conversation_set_ptt_enabled(dest, False) + self.app.sideband.named_conversation(name, dest) except Exception as e: @@ -326,6 +365,13 @@ class Conversations(): self.delete_dialog.open() return x + # def gen_move_to(item): + # def x(): + # item.dmenu.dismiss() + # self.app.sideband.conversation_set_object(self.conversation_dropdown.context_dest, not self.app.sideband.is_object(self.conversation_dropdown.context_dest)) + # self.app.conversations_view.update() + # return x + def gen_copy_addr(item): def x(): Clipboard.copy(RNS.hexrep(self.conversation_dropdown.context_dest, delimit=False)) @@ -335,6 +381,7 @@ class Conversations(): item.iconr = IconRightWidget(icon="dots-vertical"); if self.conversation_dropdown == None: + obj_str = "conversations" if is_object else "objects" dmi_h = 40 dm_items = [ { @@ -349,6 +396,12 @@ class Conversations(): "height": dp(dmi_h), "on_release": gen_copy_addr(item) }, + # { + # "text": "Move to objects", + # "viewclass": "OneLineListItem", + # "height": dp(dmi_h), + # "on_release": gen_move_to(item) + # }, { "text": "Clear Messages", "viewclass": "OneLineListItem", @@ -393,7 +446,7 @@ class Conversations(): item.add_widget(item.iconr) - item.trusted = self.app.sideband.is_trusted(context_dest) + item.trusted = self.app.sideband.is_trusted(context_dest, conv_data=existing_conv) self.added_item_dests.append(context_dest) self.list.add_widget(item) @@ -440,6 +493,7 @@ MDScreen: MDTopAppBar: title: "Conversations" + id: conversations_bar anchor_title: "left" elevation: 0 left_action_items: @@ -561,6 +615,36 @@ Builder.load_string(""" pos_hint: {"center_y": 0.43} active: root.allow_requests + MDBoxLayout: + orientation: "horizontal" + size_hint_y: None + padding: [0,0,dp(8),0] + height: dp(32) + MDLabel: + id: ptt_enabled_label + text: "PTT Enabled" + font_style: "H6" + + MDSwitch: + id: ptt_enabled_switch + pos_hint: {"center_y": 0.43} + active: root.ptt_enabled + + MDBoxLayout: + orientation: "horizontal" + size_hint_y: None + padding: [0,0,dp(8),0] + height: dp(32) + MDLabel: + id: is_object_label + text: "Is Object" + font_style: "H6" + + MDSwitch: + id: is_object_switch + pos_hint: {"center_y": 0.43} + active: root.is_object + orientation: "vertical" spacing: "24dp" diff --git a/sbapp/ui/helpers.py b/sbapp/ui/helpers.py index f6a5f12..68f73d4 100644 --- a/sbapp/ui/helpers.py +++ b/sbapp/ui/helpers.py @@ -13,6 +13,7 @@ def mdc(color, hue=None): hue = "400" return get_color_from_hex(colors[color][hue]) +color_playing = "Amber" color_received = "LightGreen" color_delivered = "Blue" color_paper = "Indigo" @@ -21,6 +22,8 @@ color_failed = "Red" color_unknown = "Gray" intensity_msgs_dark = "800" intensity_msgs_light = "500" +intensity_play_dark = "600" +intensity_play_light = "300" class ContentNavigationDrawer(Screen): pass @@ -34,6 +37,13 @@ class IconListItem(OneLineIconListItem): def is_emoji(unicode_character): return unicode_character in emoji_lookup +def strip_emojis(str_input): + output = "" + for cp in str_input: + if not is_emoji(cp): + output += cp + return output + def multilingual_markup(data): # TODO: Remove # import time @@ -117,7 +127,115 @@ codepoint_map = { 0xa960: [0xa97f, "korean"], 0xac00: [0xd7af, "korean"], 0xd7b0: [0xd7ff, "korean"], - 0x0900: [0x097f, "combined"], # Devangari + 0x0900: [0x097f, "combined"], # Devanagari } -emoji_lookup = ['⌚','⌛','','⏪','⏫','⏬','⏰','⏳','◽','◾','☔','☕','♈','♉','♊','♋','♌','♍','♎','♏','♐','♑','♒','♓','♿','⚓','⚡','⚪','⚫','⚽','⚾','⛄','⛅','⛎','⛔','⛪','⛲','⛳','⛵','⛺','⛽','✅','✊','✋','✨','❌','❎','❓','❔','❕','❗','➕','➖','➗','➰','➿','⬛','⬜','⭐','⭕','🀄','🃏','🆎','🆑','🆒','🆓','🆔','🆕','🆖','🆗','🆘','🆙','🆚','🈁','🈚','🈯','🈲','🈳','🈴','🈵','🈶','🈸','🈹','🈺','🉐','🉑','🌀','🌁','🌂','🌃','🌄','🌅','🌆','🌇','🌈','🌉','🌊','🌋','🌌','🌍','🌎','🌏','🌐','🌑','🌒','🌓','🌔','🌕','🌖','🌗','🌘','🌙','🌚','🌛','🌜','🌝','🌞','🌟','🌠','🌭','🌮','🌯','🌰','🌱','🌲','🌳','🌴','🌵','🌷','🌸','🌹','🌺','🌻','🌼','🌽','🌾','🌿','🍀','🍁','🍂','🍃','🍄','🍅','🍆','🍇','🍈','🍉','🍊','🍋','🍌','🍍','🍎','🍏','🍐','🍑','🍒','🍓','🍔','🍕','🍖','🍗','🍘','🍙','🍚','🍛','🍜','🍝','🍞','🍟','🍠','🍡','🍢','🍣','🍤','🍥','🍦','🍧','🍨','🍩','🍪','🍫','🍬','🍭','🍮','🍯','🍰','🍱','🍲','🍳','🍴','🍵','🍶','🍷','🍸','🍹','🍺','🍻','🍼','🍾','🍿','🎀','🎁','🎂','🎃','🎄','🎅','🎆','🎇','🎈','🎉','🎊','🎋','🎌','🎍','🎎','🎏','🎐','🎑','🎒','🎓','🎠','🎡','🎢','🎣','🎤','🎥','🎦','🎧','🎨','🎩','🎪','🎫','🎬','🎭','🎮','🎯','🎰','🎱','🎲','🎳','🎴','🎵','🎶','🎷','🎸','🎹','🎺','🎻','🎼','🎽','🎾','🎿','🏀','🏁','🏂','🏃','🏄','🏅','🏆','🏇','🏈','🏉','🏊','🏏','🏐','🏑','🏒','🏓','🏠','🏡','🏢','🏣','🏤','🏥','🏦','🏧','🏨','🏩','🏪','🏫','🏬','🏭','🏮','🏯','🏰','🏴','🏸','🏹','🏺','🏻','🏼','🏽','🏾','🏿','🐀','🐁','🐂','🐃','🐄','🐅','🐆','🐇','🐈','🐉','🐊','🐋','🐌','🐍','🐎','🐏','🐐','🐑','🐒','🐓','🐔','🐕','🐖','🐗','🐘','🐙','🐚','🐛','🐜','🐝','🐞','🐟','🐠','🐡','🐢','🐣','🐤','🐥','🐦','🐧','🐨','🐩','🐪','🐫','🐬','🐭','🐮','🐯','🐰','🐱','🐲','🐳','🐴','🐵','🐶','🐷','🐸','🐹','🐺','🐻','🐼','🐽','🐾','👀','👂','👃','👄','👅','👆','👇','👈','👉','👊','👋','👌','👍','👎','👏','👐','👑','👒','👓','👔','👕','👖','👗','👘','👙','👚','👛','👜','👝','👞','👟','👠','👡','👢','👣','👤','👥','👦','👧','👨','👩','👪','👫','👬','👭','👮','👯','👰','👱','👲','👳','👴','👵','👶','👷','👸','👹','👺','👻','👼','👽','👾','👿','💀','💁','💂','💃','💄','💅','💆','💇','💈','💉','💊','💋','💌','💍','💎','💏','💐','💑','💒','💓','💔','💕','💖','💗','💘','💙','💚','💛','💜','💝','💞','💟','💠','💡','💢','💣','💤','💥','💦','💧','💨','💩','💪','💫','💬','💭','💮','💯','💰','💱','💲','💳','💴','💵','💶','💷','💸','💹','💺','💻','💼','💽','💾','💿','📀','📁','📂','📃','📄','📅','📆','📇','📈','📉','📊','📋','📌','📍','📎','📏','📐','📑','📒','📓','📔','📕','📖','📗','📘','📙','📚','📛','📜','📝','📞','📟','📠','📡','📢','📣','📤','📥','📦','📧','📨','📩','📪','📫','📬','📭','📮','📯','📰','📱','📲','📳','📴','📵','📶','📷','📸','📹','📺','📻','📼','📿','🔀','🔁','🔂','🔃','🔄','🔅','🔆','🔇','🔈','🔉','🔊','🔋','🔌','🔍','🔎','🔏','🔐','🔑','🔒','🔓','🔔','🔕','🔖','🔗','🔘','🔙','🔚','🔛','🔜','🔝','🔞','🔟','🔠','🔡','🔢','🔣','🔤','🔥','🔦','🔧','🔨','🔩','🔪','🔫','🔬','🔭','🔮','🔯','🔰','🔱','🔲','🔳','🔴','🔵','🔶','🔷','🔸','🔹','🔺','🔻','🔼','🔽','🕋','🕌','🕍','🕎','🕐','🕑','🕒','🕓','🕔','🕕','🕖','🕗','🕘','🕙','🕚','🕛','🕜','🕝','🕞','🕟','🕠','🕡','🕢','🕣','🕤','🕥','🕦','🕧','🖕','🖖','🗻','🗼','🗽','🗾','🗿','😀','😁','😂','😃','😄','😅','😆','😇','😈','😉','😊','😋','😌','😍','😎','😏','😐','😑','😒','😓','😔','😕','😖','😗','😘','😙','😚','😛','😜','😝','😞','😟','😠','😡','😢','😣','😤','😥','😦','😧','😨','😩','😪','😫','😬','😭','😮','😯','😰','😱','😲','😳','😴','😵','😶','😷','😸','😹','😺','😻','😼','😽','😾','😿','🙀','🙁','🙂','🙃','🙄','🙅','🙆','🙇','🙈','🙉','🙊','🙋','🙌','🙍','🙎','🙏','🚀','🚁','🚂','🚃','🚄','🚅','🚆','🚇','🚈','🚉','🚊','🚋','🚌','🚍','🚎','🚏','🚐','🚑','🚒','🚓','🚔','🚕','🚖','🚗','🚘','🚙','🚚','🚛','🚜','🚝','🚞','🚟','🚠','🚡','🚢','🚣','🚤','🚥','🚦','🚧','🚨','🚩','🚪','🚫','🚬','🚭','🚮','🚯','🚰','🚱','🚲','🚳','🚴','🚵','🚶','🚷','🚸','🚹','🚺','🚻','🚼','🚽','🚾','🚿','🛀','🛁','🛂','🛃','🛄','🛅','🛌','🛐','🛫','🛬','🤐','🤑','🤒','🤓','🤔','🤕','🤖','🤗','🤘','🦀','🦁','🦂','🦃','🦄','🧀','🇦🇨','🇦🇩','🇦🇪','🇦🇫','🇦🇬','🇦🇮','🇦🇱','🇦🇲','🇦🇴','🇦🇶','🇦🇷','🇦🇸','🇦🇹','🇦🇺','🇦🇼','🇦🇽','🇦🇿','🇧🇦','🇧🇧','🇧🇩','🇧🇪','🇧🇫','🇧🇬','🇧🇭','🇧🇮','🇧🇯','🇧🇱','🇧🇲','🇧🇳','🇧🇴','🇧🇶','🇧🇷','🇧🇸','🇧🇹','🇧🇻','🇧🇼','🇧🇾','🇧🇿','🇨🇦','🇨🇨','🇨🇩','🇨🇫','🇨🇬','🇨🇭','🇨🇮','🇨🇰','🇨🇱','🇨🇲','🇨🇳','🇨🇴','🇨🇵','🇨🇷','🇨🇺','🇨🇻','🇨🇼','🇨🇽','🇨🇾','🇨🇿','🇩🇪','🇩🇬','🇩🇯','🇩🇰','🇩🇲','🇩🇴','🇩🇿','🇪🇦','🇪🇨','🇪🇪','🇪🇬','🇪🇭','🇪🇷','🇪🇸','🇪🇹','🇪🇺','🇫🇮','🇫🇯','🇫🇰','🇫🇲','🇫🇴','🇫🇷','🇬🇦','🇬🇧','🇬🇩','🇬🇪','🇬🇫','🇬🇬','🇬🇭','🇬🇮','🇬🇱','🇬🇲','🇬🇳','🇬🇵','🇬🇶','🇬🇷','🇬🇸','🇬🇹','🇬🇺','🇬🇼','🇬🇾','🇭🇰','🇭🇲','🇭🇳','🇭🇷','🇭🇹','🇭🇺','🇮🇨','🇮🇩','🇮🇪','🇮🇱','🇮🇲','🇮🇳','🇮🇴','🇮🇶','🇮🇷','🇮🇸','🇮🇹','🇯🇪','🇯🇲','🇯🇴','🇯🇵','🇰🇪','🇰🇬','🇰🇭','🇰🇮','🇰🇲','🇰🇳','🇰🇵','🇰🇷','🇰🇼','🇰🇾','🇰🇿','🇱🇦','🇱🇧','🇱🇨','🇱🇮','🇱🇰','🇱🇷','🇱🇸','🇱🇹','🇱🇺','🇱🇻','🇱🇾','🇲🇦','🇲🇨','🇲🇩','🇲🇪','🇲🇫','🇲🇬','🇲🇭','🇲🇰','🇲🇱','🇲🇲','🇲🇳','🇲🇴','🇲🇵','🇲🇶','🇲🇷','🇲🇸','🇲🇹','🇲🇺','🇲🇻','🇲🇼','🇲🇽','🇲🇾','🇲🇿','🇳🇦','🇳🇨','🇳🇪','🇳🇫','🇳🇬','🇳🇮','🇳🇱','🇳🇴','🇳🇵','🇳🇷','🇳🇺','🇳🇿','🇴🇲','🇵🇦','🇵🇪','🇵🇫','🇵🇬','🇵🇭','🇵🇰','🇵🇱','🇵🇲','🇵🇳','🇵🇷','🇵🇸','🇵🇹','🇵🇼','🇵🇾','🇶🇦','🇷🇪','🇷🇴','🇷🇸','🇷🇺','🇷🇼','🇸🇦','🇸🇧','🇸🇨','🇸🇩','🇸🇪','🇸🇬','🇸🇭','🇸🇮','🇸🇯','🇸🇰','🇸🇱','🇸🇲','🇸🇳','🇸🇴','🇸🇷','🇸🇸','🇸🇹','🇸🇻','🇸🇽','🇸🇾','🇸🇿','🇹🇦','🇹🇨','🇹🇩','🇹🇫','🇹🇬','🇹🇭','🇹🇯','🇹🇰','🇹🇱','🇹🇲','🇹🇳','🇹🇴','🇹🇷','🇹🇹','🇹🇻','🇹🇼','🇹🇿','🇺🇦','🇺🇬','🇺🇲','🇺🇸','🇺🇾','🇺🇿','🇻🇦','🇻🇨','🇻🇪','🇻🇬','🇻🇮','🇻🇳','🇻🇺','🇼🇫','🇼🇸','🇽🇰','🇾🇪','🇾🇹','🇿🇦','🇿🇲','🇿🇼']; \ No newline at end of file +emoji_lookup = [ + "⌚","⌛","","⏪","⏫","⏬","⏰","⏳","◽","◾","☔","☕","♈","♉","♊","♋","♌","♍","♎","♏","♐","♑","♒","♓","♿","⚓", + "⚡","⚪","⚫","⚽","⚾","⛄","⛅","⛎","⛔","⛪","⛲","⛳","⛵","⛺","⛽","✅","✊","✋","✨","❌","❎","❓","❔","❕","❗","➕", + "➖","➗","➰","➿","⬛","⬜","⭐","⭕","🀄","🃏","🆎","🆑","🆒","🆓","🆔","🆕","🆖","🆗","🆘","🆙","🆚","🈁","🈚","🈯","🈲","🈳", + "🈴","🈵","🈶","🈸","🈹","🈺","🉐","🉑","🌀","🌁","🌂","🌃","🌄","🌅","🌆","🌇","🌈","🌉","🌊","🌋","🌌","🌍","🌎","🌏","🌐","🌑", + "🌒","🌓","🌔","🌕","🌖","🌗","🌘","🌙","🌚","🌛","🌜","🌝","🌞","🌟","🌠","🌭","🌮","🌯","🌰","🌱","🌲","🌳","🌴","🌵","🌷","🌸", + "🌹","🌺","🌻","🌼","🌽","🌾","🌿","🍀","🍁","🍂","🍃","🍄","🍅","🍆","🍇","🍈","🍉","🍊","🍋","🍌","🍍","🍎","🍏","🍐","🍑","🍒", + "🍓","🍔","🍕","🍖","🍗","🍘","🍙","🍚","🍛","🍜","🍝","🍞","🍟","🍠","🍡","🍢","🍣","🍤","🍥","🍦","🍧","🍨","🍩","🍪","🍫","🍬", + "🍭","🍮","🍯","🍰","🍱","🍲","🍳","🍴","🍵","🍶","🍷","🍸","🍹","🍺","🍻","🍼","🍾","🍿","🎀","🎁","🎂","🎃","🎄","🎅","🎆","🎇", + "🎈","🎉","🎊","🎋","🎌","🎍","🎎","🎏","🎐","🎑","🎒","🎓","🎠","🎡","🎢","🎣","🎤","🎥","🎦","🎧","🎨","🎩","🎪","🎫","🎬","🎭", + "🎮","🎯","🎰","🎱","🎲","🎳","🎴","🎵","🎶","🎷","🎸","🎹","🎺","🎻","🎼","🎽","🎾","🎿","🏀","🏁","🏂","🏃","🏄","🏅","🏆","🏇", + "🏈","🏉","🏊","🏏","🏐","🏑","🏒","🏓","🏠","🏡","🏢","🏣","🏤","🏥","🏦","🏧","🏨","🏩","🏪","🏫","🏬","🏭","🏮","🏯","🏰","🏴", + "🏸","🏹","🏺","🏻","🏼","🏽","🏾","🏿","🐀","🐁","🐂","🐃","🐄","🐅","🐆","🐇","🐈","🐉","🐊","🐋","🐌","🐍","🐎","🐏","🐐","🐑", + "🐒","🐓","🐔","🐕","🐖","🐗","🐘","🐙","🐚","🐛","🐜","🐝","🐞","🐟","🐠","🐡","🐢","🐣","🐤","🐥","🐦","🐧","🐨","🐩","🐪","🐫", + "🐬","🐭","🐮","🐯","🐰","🐱","🐲","🐳","🐴","🐵","🐶","🐷","🐸","🐹","🐺","🐻","🐼","🐽","🐾","👀","👂","👃","👄","👅","👆","👇", + "👈","👉","👊","👋","👌","👍","👎","👏","👐","👑","👒","👓","👔","👕","👖","👗","👘","👙","👚","👛","👜","👝","👞","👟","👠","👡", + "👢","👣","👤","👥","👦","👧","👨","👩","👪","👫","👬","👭","👮","👯","👰","👱","👲","👳","👴","👵","👶","👷","👸","👹","👺","👻", + "👼","👽","👾","👿","💀","💁","💂","💃","💄","💅","💆","💇","💈","💉","💊","💋","💌","💍","💎","💏","💐","💑","💒","💓","💔","💕", + "💖","💗","💘","💙","💚","💛","💜","💝","💞","💟","💠","💡","💢","💣","💤","💥","💦","💧","💨","💩","💪","💫","💬","💭","💮","💯", + "💰","💱","💲","💳","💴","💵","💶","💷","💸","💹","💺","💻","💼","💽","💾","💿","📀","📁","📂","📃","📄","📅","📆","📇","📈","📉", + "📊","📋","📌","📍","📎","📏","📐","📑","📒","📓","📔","📕","📖","📗","📘","📙","📚","📛","📜","📝","📞","📟","📠","📡","📢","📣", + "📤","📥","📦","📧","📨","📩","📪","📫","📬","📭","📮","📯","📰","📱","📲","📳","📴","📵","📶","📷","📸","📹","📺","📻","📼","📿", + "🔀","🔁","🔂","🔃","🔄","🔅","🔆","🔇","🔈","🔉","🔊","🔋","🔌","🔍","🔎","🔏","🔐","🔑","🔒","🔓","🔔","🔕","🔖","🔗","🔘","🔙", + "🔚","🔛","🔜","🔝","🔞","🔟","🔠","🔡","🔢","🔣","🔤","🔥","🔦","🔧","🔨","🔩","🔪","🔫","🔬","🔭","🔮","🔯","🔰","🔱","🔲","🔳", + "🔴","🔵","🔶","🔷","🔸","🔹","🔺","🔻","🔼","🔽","🕋","🕌","🕍","🕎","🕐","🕑","🕒","🕓","🕔","🕕","🕖","🕗","🕘","🕙","🕚","🕛", + "🕜","🕝","🕞","🕟","🕠","🕡","🕢","🕣","🕤","🕥","🕦","🕧","🖕","🖖","🗻","🗼","🗽","🗾","🗿","😀","😁","😂","😃","😄","😅","😆", + "😇","😈","😉","😊","😋","😌","😍","😎","😏","😐","😑","😒","😓","😔","😕","😖","😗","😘","😙","😚","😛","😜","😝","😞","😟","😠", + "😡","😢","😣","😤","😥","😦","😧","😨","😩","😪","😫","😬","😭","😮","😯","😰","😱","😲","😳","😴","😵","😶","😷","😸","😹","😺", + "😻","😼","😽","😾","😿","🙀","🙁","🙂","🙃","🙄","🙅","🙆","🙇","🙈","🙉","🙊","🙋","🙌","🙍","🙎","🙏","🚀","🚁","🚂","🚃","🚄", + "🚅","🚆","🚇","🚈","🚉","🚊","🚋","🚌","🚍","🚎","🚏","🚐","🚑","🚒","🚓","🚔","🚕","🚖","🚗","🚘","🚙","🚚","🚛","🚜","🚝","🚞", + "🚟","🚠","🚡","🚢","🚣","🚤","🚥","🚦","🚧","🚨","🚩","🚪","🚫","🚬","🚭","🚮","🚯","🚰","🚱","🚲","🚳","🚴","🚵","🚶","🚷","🚸", + "🚹","🚺","🚻","🚼","🚽","🚾","🚿","🛀","🛁","🛂","🛃","🛄","🛅","🛌","🛐","🛫","🛬","🤐","🤑","🤒","🤓","🤔","🤕","🤖","🤗","🤘", + "🦀","🦁","🦂","🦃","🦄","🧀","🇦🇨","🇦🇩","🇦🇪","🇦🇫","🇦🇬","🇦🇮","🇦🇱","🇦🇲","🇦🇴","🇦🇶","🇦🇷","🇦🇸","🇦🇹","🇦🇺","🇦🇼","🇦🇽","🇦🇿","🇧🇦","🇧🇧","🇧🇩", + "🇧🇪","🇧🇫","🇧🇬","🇧🇭","🇧🇮","🇧🇯","🇧🇱","🇧🇲","🇧🇳","🇧🇴","🇧🇶","🇧🇷","🇧🇸","🇧🇹","🇧🇻","🇧🇼","🇧🇾","🇧🇿","🇨🇦","🇨🇨","🇨🇩","🇨🇫","🇨🇬","🇨🇭","🇨🇮","🇨🇰", + "🇨🇱","🇨🇲","🇨🇳","🇨🇴","🇨🇵","🇨🇷","🇨🇺","🇨🇻","🇨🇼","🇨🇽","🇨🇾","🇨🇿","🇩🇪","🇩🇬","🇩🇯","🇩🇰","🇩🇲","🇩🇴","🇩🇿","🇪🇦","🇪🇨","🇪🇪","🇪🇬","🇪🇭","🇪🇷","🇪🇸", + "🇪🇹","🇪🇺","🇫🇮","🇫🇯","🇫🇰","🇫🇲","🇫🇴","🇫🇷","🇬🇦","🇬🇧","🇬🇩","🇬🇪","🇬🇫","🇬🇬","🇬🇭","🇬🇮","🇬🇱","🇬🇲","🇬🇳","🇬🇵","🇬🇶","🇬🇷","🇬🇸","🇬🇹","🇬🇺","🇬🇼", + "🇬🇾","🇭🇰","🇭🇲","🇭🇳","🇭🇷","🇭🇹","🇭🇺","🇮🇨","🇮🇩","🇮🇪","🇮🇱","🇮🇲","🇮🇳","🇮🇴","🇮🇶","🇮🇷","🇮🇸","🇮🇹","🇯🇪","🇯🇲","🇯🇴","🇯🇵","🇰🇪","🇰🇬","🇰🇭","🇰🇮", + "🇰🇲","🇰🇳","🇰🇵","🇰🇷","🇰🇼","🇰🇾","🇰🇿","🇱🇦","🇱🇧","🇱🇨","🇱🇮","🇱🇰","🇱🇷","🇱🇸","🇱🇹","🇱🇺","🇱🇻","🇱🇾","🇲🇦","🇲🇨","🇲🇩","🇲🇪","🇲🇫","🇲🇬","🇲🇭","🇲🇰", + "🇲🇱","🇲🇲","🇲🇳","🇲🇴","🇲🇵","🇲🇶","🇲🇷","🇲🇸","🇲🇹","🇲🇺","🇲🇻","🇲🇼","🇲🇽","🇲🇾","🇲🇿","🇳🇦","🇳🇨","🇳🇪","🇳🇫","🇳🇬","🇳🇮","🇳🇱","🇳🇴","🇳🇵","🇳🇷","🇳🇺", + "🇳🇿","🇴🇲","🇵🇦","🇵🇪","🇵🇫","🇵🇬","🇵🇭","🇵🇰","🇵🇱","🇵🇲","🇵🇳","🇵🇷","🇵🇸","🇵🇹","🇵🇼","🇵🇾","🇶🇦","🇷🇪","🇷🇴","🇷🇸","🇷🇺","🇷🇼","🇸🇦","🇸🇧","🇸🇨","🇸🇩", + "🇸🇪","🇸🇬","🇸🇭","🇸🇮","🇸🇯","🇸🇰","🇸🇱","🇸🇲","🇸🇳","🇸🇴","🇸🇷","🇸🇸","🇸🇹","🇸🇻","🇸🇽","🇸🇾","🇸🇿","🇹🇦","🇹🇨","🇹🇩","🇹🇫","🇹🇬","🇹🇭","🇹🇯","🇹🇰","🇹🇱", + "🇹🇲","🇹🇳","🇹🇴","🇹🇷","🇹🇹","🇹🇻","🇹🇼","🇹🇿","🇺🇦","🇺🇬","🇺🇲","🇺🇸","🇺🇾","🇺🇿","🇻🇦","🇻🇨","🇻🇪","🇻🇬","🇻🇮","🇻🇳","🇻🇺","🇼🇫","🇼🇸","🇽🇰","🇾🇪","🇾🇹", + "🇿🇦","🇿🇲","🇿🇼","🟢"]; + +emoji_extra_1 = [ + "©","®","‼","⁉","™","ℹ","↔","↕","↖","↗","↘","↙","↩","↪","⌚","⌛","⌨","⏏","⏩","⏪","⏫","⏬","⏭","⏮","⏯", + "⏰","⏱","⏲","⏳","⏸","⏹","⏺","Ⓜ","▪","▫","▶","◀","◻","◼","◽","◾","☀","☁","☂","☃","☄","☎","☑","☔", + "☕","☘","☝","☠","☢","☣","☦","☪","☮","☯","☸","☹","☺","♀","♂","♈","♉","♊","♋","♌","♍","♎","♏","♐", + "♑","♒","♓","♟","♠","♣","♥","♦","♨","♻","♾","♿","⚒","⚓","⚔","⚕","⚖","⚗","⚙","⚛","⚜","⚠","⚡","⚧","⚪", + "⚫","⚰","⚱","⚽","⚾","⛄","⛅","⛈","⛎","⛏","⛑","⛓","⛔","⛩","⛪","⛰","⛱","⛲","⛳","⛴","⛵","⛷","⛸", + "⛹","⬆","⬇","⬛","⬜","⭐","⭕","〰","⛺","⛽","✂","✅","✈","✉","✊","✋","✌","✍","✏","✒","✔","✖","✝","✡", + "✨","✳","✴","❄","❇","❌","❎","❓","❔","❕","❗","❣","❤","➕","➖","➗","➡","➰","➿","⤴","⤵","⬅","〽","㊗", + "㊙","🀄","🃏","🅰","🅱","🅾","🅿","🆎","🆑","🆒","🆓","🆔","🆕","🆖","🆗","🆘","🆙","🆚","🈁","🈂","🈚","🈯","🈲", + "🈳","🈴","🈵","🈶","🈷","🈸","🈹","🈺","🉐","🉑","🌀","🌁","🌂","🌃","🌄","🌅","🌆","🌇","🌈","🌉","🌊","🌋", + "🌌","🌍","🌎","🌏","🌐","🌑","🌒","🌓","🌔","🌕","🌖","🌗","🌘","🌙","🌚","🌛","🌜","🌝","🌞","🌟","🌠","🌡", + "🌤","🌥","🌦","🌧","🌨","🌩","🌪","🌫","🌬","🌭","🌮","🌯","🌰","🌱","🌲","🌳","🌴","🌵","🌶","🌷","🌸","🌹", + "🌺","🌻","🌼","🌽","🌾","🌿","🍀","🍁","🍂","🍃","🍄","🍅","🍆","🍇","🍈","🍉","🍊","🍋","🍌","🍍","🍎","🍏", + "🍐","🍑","🍒","🍓","🍔","🍕","🍖","🍗","🍘","🍙","🍚","🍛","🍜","🍝","🍞","🍟","🍠","🍡","🍢","🍣","🍤","🍥", + "🍦","🍧","🍨","🍩","🍪","🍫","🍬","🍭","🍮","🍯","🍰","🍱","🍲","🍳","🍴","🍵","🍶","🍷","🍸","🍹","🍺","🍻", + "🍼","🍽","🍾","🍿","🎀","🎁","🎂","🎃","🎄","🎅","🎆","🎇","🎈","🎉","🎊","🎋","🎌","🎍","🎎","🎏","🎐","🎑", + "🎒","🎓","🎖","🎗","🎙","🎚","🎛","🎞","🎟","🎠","🎡","🎢","🎣","🎤","🎥","🎦","🎧","🎨","🎩","🎪","🎫","🎬","🎭", + "🎮","🎯","🎰","🎱","🎲","🎳","🎴","🎵","🎶","🎷","🎸","🎹","🎺","🎻","🎼","🎽","🎾","🎿","🏀","🏁","🏂","🏃", + "🏄","🏅","🏆","🏇","🏈","🏉","🏊","🏋","🏌","🏍","🏎","🏏","🏐","🏑","🏒","🏓","🏔","🏕","🏖","🏗","🏘","🏙", + "🏚","🏛","🏜","🏝","🏞","🏟","🏠","🏡","🏢","🏣","🏤","🏥","🏦","🏧","🏨","🏩","🏪","🏫","🏬","🏭","🏮","🏯", + "🏰","🏳","🏴","🏵","🏷","🏸","🏹","🏺","🏻","🏼","🏽","🏾","🏿","🐀","🐁","🐂","🐃","🐄","🐅","🐆","🐇","🐈", + "🐉","🐊","🐋","🐌","🐍","🐎","🐏","🐐","🐑","🐒","🐓","🐔","🐕","🐖","🐗","🐘","🐙","🐚","🐛","🐜","🐝","🐞", + "🐟","🐠","🐡","🐢","🐣","🐤","🐥","🐦","🐧","🐨","🐩","🐪","🐫","🐬","🐭","🐮","🐯","🐰","🐱","🐲","🐳","🐴", + "🐵","🐶","🐷","🐸","🐹","🐺","🐻","🐼","🐽","🐾","🐿","👀","👁","👂","👃","👄","👅","👆","👇","👈","👉","👊", + "👋","👌","👍","👎","👏","👐","👑","👒","👓","👔","👕","👖","👗","👘","👙","👚","👛","👜","👝","👞","👟","👠", + "👡","👢","👣","👤","👥","👦","👧","👨","👩","👪","👫","👬","👭","👮","👯","👰","👱","👲","👳","👴","👵","👶", + "👷","👸","👹","👺","👻","👼","👽","👾","👿","💀","💁","💂","💃","💄","💅","💆","💇","💈","💉","💊","💋","💌", + "💍","💎","💏","💐","💑","💒","💓","💔","💕","💖","💗","💘","💙","💚","💛","💜","💝","💞","💟","💠","💡","💢", + "💣","💤","💥","💦","💧","💨","💩","💪","💫","💬","💭","💮","💯","💰","💱","💲","💳","💴","💵","💶","💷","💸", + "💹","💺","💻","💼","💽","💾","💿","📀","📁","📂","📃","📄","📅","📆","📇","📈","📉","📊","📋","📌","📍","📎", + "📏","📐","📑","📒","📓","📔","📕","📖","📗","📘","📙","📚","📛","📜","📝","📞","📟","📠","📡","📢","📣","📤", + "📥","📦","📧","📨","📩","📪","📫","📬","📭","📮","📯","📰","📱","📲","📳","📴","📵","📶","📷","📸","📹","📺", + "📻","📼","📽","📿","🔀","🔁","🔂","🔃","🔄","🔅","🔆","🔇","🔈","🔉","🔊","🔋","🔌","🔍","🔎","🔏","🔐","🔑", + "🔒","🔓","🔔","🔕","🔖","🔗","🔘","🔙","🔚","🔛","🔜","🔝","🔞","🔟","🔠","🔡","🔢","🔣","🔤","🔥","🔦","🔧", + "🔨","🔩","🔪","🔫","🔬","🔭","🔮","🔯","🔰","🔱","🔲","🔳","🔴","🔵","🔶","🔷","🔸","🔹","🔺","🔻","🔼","🔽", + "🕉","🕊","🕋","🕌","🕍","🕎","🕐","🕑","🕒","🕓","🕔","🕕","🕖","🕗","🕘","🕙","🕚","🕛","🕜","🕝","🕞","🕟", + "🕠","🕡","🕢","🕣","🕤","🕥","🕦","🕧","🕯","🕰","🕳","🕴","🕵","🕶","🕷","🕸","🕹","🕺","🖇","🖊","🖋","🖌","🖍", + "🖐","🖕","🖖","🖤","🖥","🖨","🖱","🖲","🖼","🗂","🗃","🗄","🗑","🗒","🗓","🗜","🗝","🗞","🗡","🗣","🗨","🗯","🗳","🗺", + "🗻","🗼","🗽","🗾","🗿","😀","😁","😂","😃","😄","😅","😆","😇","😈","😉","😊","😋","😌","😍","😎","😏","😐", + "😑","😒","😓","😔","😕","😖","😗","😘","😙","😚","😛","😜","😝","😞","😟","😠","😡","😢","😣","😤","😥","😦", + "😧","😨","😩","😪","😫","😬","😭","😮","😯","😰","😱","😲","😳","😴","😵","😶","😷","😸","😹","😺","😻","😼", + "😽","😾","😿","🙀","🙁","🙂","🙃","🙄","🙅","🙆","🙇","🙈","🙉","🙊","🙋","🙌","🙍","🙎","🙏","🚀","🚁","🚂", + "🚃","🚄","🚅","🚆","🚇","🚈","🚉","🚊","🚋","🚌","🚍","🚎","🚏","🚐","🚑","🚒","🚓","🚔","🚕","🚖","🚗","🚘", + "🚙","🚚","🚛","🚜","🚝","🚞","🚟","🚠","🚡","🚢","🚣","🚤","🚥","🚦","🚧","🚨","🚩","🚪","🚫","🚬","🚭","🚮", + "🚯","🚰","🚱","🚲","🚳","🚴","🚵","🚶","🚷","🚸","🚹","🚺","🚻","🚼","🚽","🚾","🚿","🛀","🛁","🛂","🛃","🛄", + "🛅","🛋","🛌","🛍","🛎","🛏","🛐","🛑","🛒","🛕","🛖","🛗","🛠","🛡","🛢","🛣","🛤","🛥","🛩","🛫","🛬","🛰","🛳", + "🛴","🛵","🛶","🛷","🛸","🛹","🛺","🛻","🛼","🟠","🟡","🟢","🟣","🟤","🟥","🟦","🟧","🟨","🟩","🟪","🟫","🤌", + "🤍","🤎","🤏","🤐","🤑","🤒","🤓","🤔","🤕","🤖","🤗","🤘","🤙","🤚","🤛","🤜","🤝","🤞","🤟","🤠","🤡","🤢", + "🤣","🤤","🤥","🤦","🤧","🤨","🤩","🤪","🤫","🤬","🤭","🤮","🤯","🤰","🤱","🤲","🤳","🤴","🤵","🤶","🤷","🤸", + "🤹","🤺","🤼","🤽","🤾","🤿","🥀","🥁","🥂","🥃","🥄","🥅","🥇","🥈","🥉","🥊","🥋","🥌","🥍","🥎","🥏","🥐", + "🥑","🥒","🥓","🥔","🥕","🥖","🥗","🥘","🥙","🥚","🥛","🥜","🥝","🥞","🥟","🥠","🥡","🥢","🥣","🥤","🥥","🥦", + "🥧","🥨","🥩","🥪","🥫","🥬","🥭","🥮","🥯","🥰","🥱","🥲","🥳","🥴","🥵","🥶","🥷","🥸","🥺","🥻","🥼","🥽", + "🥾","🥿","🦀","🦁","🦂","🦃","🦄","🦅","🦆","🦇","🦈","🦉","🦊","🦋","🦌","🦍","🦎","🦏","🦐","🦑","🦒","🦓", + "🦔","🦕","🦖","🦗","🦘","🦙","🦚","🦛","🦜","🦝","🦞","🦟","🦠","🦡","🦢","🦣","🦤","🦥","🦦","🦧","🦨","🦩", + "🦪","🦫","🦬","🦭","🦮","🦯","🦰","🦱","🦲","🦳","🦴","🦵","🦶","🦷","🦸","🦹","🦺","🦻","🦼","🦽","🦾","🦿", + "🧀","🧁","🧂","🧃","🧄","🧅","🧆","🧇","🧈","🧉","🧊","🧋","🧍","🧎","🧏","🧐","🧑","🧒","🧓","🧔","🧕","🧖", + "🧗","🧘","🧙","🧚","🧛","🧜","🧝","🧞","🧟","🧠","🧡","🧢","🧣","🧤","🧥","🧦","🧧","🧨","🧩","🧪","🧫","🧬", + "🧭","🧮","🧯","🧰","🧱","🧲","🧳","🧴","🧵","🧶","🧷","🧸","🧹","🧺","🧻","🧼","🧽","🧾","🧿","🩰","🩱","🩲", + "🩳","🩴","🩸","🩹","🩺","🪀","🪁","🪂","🪃","🪄","🪅","🪆","🪐","🪑","🪒","🪓","🪔","🪕","🪖","🪗","🪘","🪙", + "🪚","🪛","🪜","🪝","🪞","🪟","🪠","🪡","🪢","🪣","🪤","🪥","🪦","🪧","🪨","🪰","🪱","🪲","🪳","🪴","🪵","🪶", + "🫀","🫁","🫂","🫐","🫑","🫒","🫓","🫔","🫕","🫖"] + +for e in emoji_extra_1: + if not e in emoji_lookup: + emoji_lookup.append(e) diff --git a/sbapp/ui/layouts.py b/sbapp/ui/layouts.py index 3a6d6af..f687676 100644 --- a/sbapp/ui/layouts.py +++ b/sbapp/ui/layouts.py @@ -61,6 +61,16 @@ MDNavigationLayout: on_release: root.ids.screen_manager.app.conversations_action(self) + OneLineIconListItem: + text: "Objects & Devices" + on_release: root.ids.screen_manager.app.objects_action(self) + # _no_ripple_effect: True + + IconLeftWidget: + icon: "devices" + on_release: root.ids.screen_manager.app.objects_action(self) + + OneLineIconListItem: text: "Situation Map" on_release: root.ids.screen_manager.app.map_action(self) @@ -1350,6 +1360,19 @@ MDScreen: font_size: dp(24) height: dp(64) + MDLabel: + text: "Appearance" + font_style: "H6" + size_hint_y: None + height: self.texture_size[1] + + MDLabel: + id: settings_info3 + markup: True + text: "\\nThis section lets you configure the appearance of the application to suit your preferences, such as themes and what levels of information to display. When user icons are enabled, the contact list will display icons other users have configured in their [b]Telemetry[/b] settings.\\n" + size_hint_y: None + height: self.texture_size[1] + MDBoxLayout: orientation: "horizontal" size_hint_y: None @@ -1402,7 +1425,7 @@ MDScreen: height: dp(48) MDLabel: - text: "Display styles in conversation list" + text: "Show user icons in conversation list" font_style: "H6" MDSwitch: @@ -1410,6 +1433,21 @@ MDScreen: pos_hint: {"center_y": 0.3} active: False + MDBoxLayout: + orientation: "horizontal" + size_hint_y: None + padding: [0,0,dp(24),dp(0)] + height: dp(48) + + MDLabel: + text: "Only show user icons from trusted" + font_style: "H6" + + MDSwitch: + id: display_style_from_trusted_only + pos_hint: {"center_y": 0.3} + active: False + MDBoxLayout: orientation: "horizontal" size_hint_y: None @@ -1425,6 +1463,19 @@ MDScreen: pos_hint: {"center_y": 0.3} active: False + MDLabel: + text: "\\nBehaviour" + font_style: "H6" + size_hint_y: None + height: self.texture_size[1] + + MDLabel: + id: settings_info3 + markup: True + text: "\\nThis section configures various automated actions and default behaviours. Sync intervals can be configured, and you can control what kind of peers can send you messages.\\n" + size_hint_y: None + height: self.texture_size[1] + MDBoxLayout: orientation: "horizontal" size_hint_y: None @@ -1552,6 +1603,54 @@ MDScreen: sensitivity: "all" hint: False + MDBoxLayout: + orientation: "horizontal" + size_hint_y: None + padding: [0,0,dp(24),dp(0)] + height: dp(48) + + MDLabel: + id: settings_lxmf_require_stamps_label + text: "Require stamps for incoming" + font_style: "H6" + + MDSwitch: + id: settings_lxmf_require_stamps + pos_hint: {"center_y": 0.3} + disabled: False + active: False + + MDBoxLayout: + id: lxmf_costslider_container + orientation: "vertical" + size_hint_y: None + padding: [0,0,dp(0),0] + height: dp(68) + + MDSlider + min: 1 + max: 32 + value: 8 + id: settings_lxmf_require_stamps_cost + sensitivity: "all" + hint: False + + MDBoxLayout: + orientation: "horizontal" + size_hint_y: None + padding: [0,0,dp(24),dp(0)] + height: dp(48) + + MDLabel: + text: "Ignore messages with invalid stamps" + font_style: "H6" + + MDSwitch: + id: settings_ignore_invalid_stamps + pos_hint: {"center_y": 0.3} + disabled: False + active: False + MDBoxLayout: orientation: "horizontal" size_hint_y: None diff --git a/sbapp/ui/messages.py b/sbapp/ui/messages.py index e802366..7c370cb 100644 --- a/sbapp/ui/messages.py +++ b/sbapp/ui/messages.py @@ -25,25 +25,31 @@ else: import io import os -import plyer import subprocess import shlex from kivy.graphics.opengl import glGetIntegerv, GL_MAX_TEXTURE_SIZE if RNS.vendor.platformutils.get_platform() == "android": + import plyer from sideband.sense import Telemeter, Commands from ui.helpers import ts_format, file_ts_format, mdc - from ui.helpers import color_received, color_delivered, color_propagated, color_paper, color_failed, color_unknown, intensity_msgs_dark, intensity_msgs_light + from ui.helpers import color_playing, color_received, color_delivered, color_propagated, color_paper, color_failed, color_unknown, intensity_msgs_dark, intensity_msgs_light, intensity_play_dark, intensity_play_light else: + import sbapp.plyer as plyer from sbapp.sideband.sense import Telemeter, Commands from .helpers import ts_format, file_ts_format, mdc - from .helpers import color_received, color_delivered, color_propagated, color_paper, color_failed, color_unknown, intensity_msgs_dark, intensity_msgs_light + from .helpers import color_playing, color_received, color_delivered, color_propagated, color_paper, color_failed, color_unknown, intensity_msgs_dark, intensity_msgs_light, intensity_play_dark, intensity_play_light if RNS.vendor.platformutils.is_darwin(): from PIL import Image as PilImage from kivy.lang.builder import Builder +from kivymd.uix.list import OneLineIconListItem, IconLeftWidget + +class DialogItem(OneLineIconListItem): + divider = None + icon = StringProperty() class ListLXMessageCard(MDCard): # class ListLXMessageCard(MDCard, FakeRectangularElevationBehavior): @@ -56,6 +62,7 @@ class Messages(): self.context_dest = context_dest self.source_dest = context_dest self.is_trusted = self.app.sideband.is_trusted(self.context_dest) + self.ptt_enabled = self.app.sideband.ptt_enabled(self.context_dest) self.screen = self.app.root.ids.screen_manager.get_screen("messages_screen") self.ids = self.screen.ids @@ -71,6 +78,7 @@ class Messages(): self.widgets = [] self.send_error_dialog = None self.load_more_button = None + self.details_dialog = None self.update() def reload(self): @@ -93,6 +101,99 @@ class Messages(): self.loading_earlier_messages = True self.list.remove_widget(self.load_more_button) + def message_details_dialog(self, lxm_hash): + RNS.log(f"Opening dialog for {RNS.prettyhexrep(lxm_hash)}", RNS.LOG_DEBUG) + ss = int(dp(16)) + ms = int(dp(14)) + + msg = self.app.sideband.message(lxm_hash) + if msg: + close_button = MDRectangleFlatButton(text="Close", font_size=dp(18)) + # d_items = [ ] + # d_items.append(DialogItem(IconLeftWidget(icon="postage-stamp"), text="[size="+str(ss)+"]Stamp[/size]")) + + d_text = "" + + if "lxm" in msg and msg["lxm"] != None: + size_str = RNS.prettysize(msg["lxm"].packed_size) + d_text += f"[size={ss}][b]Message size[/b] {size_str}[/size]\n" + + if msg["lxm"].signature_validated: + d_text += f"[size={ss}][b]Signature[/b] validated successfully[/size]\n" + else: + d_text += f"[size={ss}][b]Signature[/b] is invalid[/size]\n" + + ratchet_method = "" + if "method" in msg: + if msg["method"] == LXMF.LXMessage.UNKNOWN: + d_text += f"[size={ss}][b]Delivered[/b] via unknown method[/size]\n" + if msg["method"] == LXMF.LXMessage.OPPORTUNISTIC: + ratchet_method = "with ratchet" + d_text += f"[size={ss}][b]Delivered[/b] opportunistically[/size]\n" + if msg["method"] == LXMF.LXMessage.DIRECT: + ratchet_method = "by link" + d_text += f"[size={ss}][b]Delivered[/b] over direct link[/size]\n" + if msg["method"] == LXMF.LXMessage.PROPAGATED: + ratchet_method = "with ratchet" + d_text += f"[size={ss}][b]Delivered[/b] to propagation network[/size]\n" + + if msg["extras"] != None and "ratchet_id" in msg["extras"]: + r_str = RNS.prettyhexrep(msg["extras"]["ratchet_id"]) + d_text += f"[size={ss}][b]Encrypted[/b] {ratchet_method} {r_str}[/size]\n" + else: + if msg["method"] == LXMF.LXMessage.OPPORTUNISTIC or msg["method"] == LXMF.LXMessage.PROPAGATED: + d_text += f"[size={ss}][b]Encrypted[/b] with destination identity key[/size]\n" + else: + d_text += f"[size={ss}][b]Encryption[/b] status unknown[/size]\n" + + if msg["extras"] != None and "stamp_checked" in msg["extras"]: + valid_str = " is not valid" + if msg["extras"]["stamp_valid"] == True: + valid_str = " is valid" + sv = msg["extras"]["stamp_value"] + if sv == None: + if "stamp_raw" in msg["extras"]: + sv_str = "" + valid_str = "is not valid" + else: + sv_str = "" + valid_str = "was not included in the message" + elif sv > 255: + sv_str = "generated from ticket" + else: + sv_str = f"with value {sv}" + + if msg["extras"]["stamp_checked"] == True: + d_text += f"[size={ss}][b]Stamp[/b] {sv_str}{valid_str}[/size]\n" + + else: + sv = msg["extras"]["stamp_value"] + if sv == None: + pass + elif sv > 255: + d_text += f"[size={ss}][b]Stamp[/b] generated from ticket[/size]\n" + else: + d_text += f"[size={ss}][b]Value[/b] of stamp is {sv}[/size]\n" + + # Stamp details + if "stamp_raw" in msg["extras"] and type(msg["extras"]["stamp_raw"]) == bytes: + sstr = RNS.hexrep(msg["extras"]["stamp_raw"]) + sstr1 = RNS.hexrep(msg["extras"]["stamp_raw"][:16]) + sstr2 = RNS.hexrep(msg["extras"]["stamp_raw"][16:]) + d_text += f"[size={ss}]\n[b]Raw stamp[/b]\n[/size][size={ms}][font=RobotoMono-Regular]{sstr1}\n{sstr2}[/font][/size]\n" + + self.details_dialog = MDDialog( + title="Message Details", + type="simple", + text=d_text, + # items=d_items, + buttons=[ close_button ], + width_offset=dp(32), + ) + + close_button.bind(on_release=self.details_dialog.dismiss) + self.details_dialog.open() + def update(self, limit=8): for new_message in self.app.sideband.list_messages(self.context_dest, after=self.latest_message_timestamp,limit=limit): self.new_messages.append(new_message) @@ -117,6 +218,14 @@ class Messages(): layout.bind(minimum_height=layout.setter('height')) self.list = layout + if RNS.vendor.platformutils.is_darwin() or RNS.vendor.platformutils.is_windows(): + self.hide_widget(self.ids.message_ptt, True) + else: + if self.ptt_enabled: + self.hide_widget(self.ids.message_ptt, False) + else: + self.hide_widget(self.ids.message_ptt, True) + c_ts = time.time() if len(self.new_messages) > 0: self.update_widget() @@ -126,8 +235,10 @@ class Messages(): if self.app.sideband.config["dark_ui"]: intensity_msgs = intensity_msgs_dark + intensity_play = intensity_play_dark else: intensity_msgs = intensity_msgs_light + intensity_play = intensity_play_light for w in self.widgets: m = w.m @@ -149,6 +260,13 @@ class Messages(): if prg != None: prgstr = ", "+str(round(prg*100, 1))+"% done" if prg <= 0.00: + stamp_cost = self.app.sideband.get_lxm_stamp_cost(msg["hash"]) + if stamp_cost: + sphrase = f"Generating stamp with cost {stamp_cost}" + prgstr = "" + else: + sphrase = "Waiting for path" + elif prg <= 0.01: sphrase = "Waiting for path" elif prg <= 0.03: sphrase = "Establishing link" @@ -160,8 +278,12 @@ class Messages(): if msg["title"]: titlestr = "[b]Title[/b] "+msg["title"].decode("utf-8")+"\n" w.heading = titlestr+"[b]Sent[/b] "+txstr+"\n[b]State[/b] "+sphrase+prgstr+" " + if w.has_audio: + alstr = RNS.prettysize(w.audio_size) + w.heading += f"\n[b]Audio Message[/b] ({alstr})" m["state"] = msg["state"] + if msg["state"] == LXMF.LXMessage.DELIVERED: w.md_bg_color = msg_color = mdc(color_delivered, intensity_msgs) txstr = time.strftime(ts_format, time.localtime(msg["sent"])) @@ -169,6 +291,9 @@ class Messages(): if msg["title"]: titlestr = "[b]Title[/b] "+msg["title"].decode("utf-8")+"\n" w.heading = titlestr+"[b]Sent[/b] "+txstr+"\n[b]State[/b] Delivered" + if w.has_audio: + alstr = RNS.prettysize(w.audio_size) + w.heading += f"\n[b]Audio Message[/b] ({alstr})" m["state"] = msg["state"] if msg["method"] == LXMF.LXMessage.PAPER: @@ -187,6 +312,9 @@ class Messages(): if msg["title"]: titlestr = "[b]Title[/b] "+msg["title"].decode("utf-8")+"\n" w.heading = titlestr+"[b]Sent[/b] "+txstr+"\n[b]State[/b] On Propagation Net" + if w.has_audio: + alstr = RNS.prettysize(w.audio_size) + w.heading += f"\n[b]Audio Message[/b] ({alstr})" m["state"] = msg["state"] if msg["state"] == LXMF.LXMessage.FAILED: @@ -197,15 +325,29 @@ class Messages(): titlestr = "[b]Title[/b] "+msg["title"].decode("utf-8")+"\n" w.heading = titlestr+"[b]Sent[/b] "+txstr+"\n[b]State[/b] Failed" m["state"] = msg["state"] + if w.has_audio: + alstr = RNS.prettysize(w.audio_size) + w.heading += f"\n[b]Audio Message[/b] ({alstr})" w.dmenu.items.append(w.dmenu.retry_item) + def hide_widget(self, wid, dohide=True): + if hasattr(wid, 'saved_attrs'): + if not dohide: + wid.height, wid.size_hint_y, wid.opacity, wid.disabled = wid.saved_attrs + del wid.saved_attrs + elif dohide: + wid.saved_attrs = wid.height, wid.size_hint_y, wid.opacity, wid.disabled + wid.height, wid.size_hint_y, wid.opacity, wid.disabled = 0, None, 0, True + def update_widget(self): if self.app.sideband.config["dark_ui"]: intensity_msgs = intensity_msgs_dark + intensity_play = intensity_play_dark mt_color = [1.0, 1.0, 1.0, 0.8] else: intensity_msgs = intensity_msgs_light + intensity_play = intensity_play_light mt_color = [1.0, 1.0, 1.0, 0.95] self.ids.message_text.font_name = self.app.input_font @@ -220,6 +362,10 @@ class Messages(): else: message_input = m["content"] + if message_input.strip() == b"": + if not ("lxm" in m and m["lxm"] != None and m["lxm"].fields != None and LXMF.FIELD_COMMANDS in m["lxm"].fields): + message_input = "[i]This message contains no text content[/i]".encode("utf-8") + message_markup = multilingual_markup(message_input) txstr = time.strftime(ts_format, time.localtime(m["sent"])) @@ -229,11 +375,15 @@ class Messages(): extra_telemetry = {} telemeter = None image_field = None + audio_field = None has_image = False + has_audio = False attachments_field = None has_attachment = False force_markup = False signature_valid = False + stamp_valid = False + stamp_value = None if "lxm" in m and m["lxm"] != None and m["lxm"].signature_validated: signature_valid = True @@ -244,6 +394,10 @@ class Messages(): except Exception as e: pass + if "extras" in m and m["extras"] != None and "stamp_checked" in m["extras"] and m["extras"]["stamp_checked"] == True: + stamp_valid = m["extras"]["stamp_valid"] + stamp_value = m["extras"]["stamp_value"] + if "lxm" in m and m["lxm"] != None and m["lxm"].fields != None and LXMF.FIELD_COMMANDS in m["lxm"].fields: try: commands = m["lxm"].fields[LXMF.FIELD_COMMANDS] @@ -276,6 +430,13 @@ class Messages(): except Exception as e: pass + if "lxm" in m and m["lxm"] and m["lxm"].fields != None and LXMF.FIELD_AUDIO in m["lxm"].fields: + try: + audio_field = m["lxm"].fields[LXMF.FIELD_AUDIO] + has_audio = True + except Exception as e: + pass + if "lxm" in m and m["lxm"] and m["lxm"].fields != None and LXMF.FIELD_FILE_ATTACHMENTS in m["lxm"].fields: if len(m["lxm"].fields[LXMF.FIELD_FILE_ATTACHMENTS]) > 0: try: @@ -356,6 +517,9 @@ class Messages(): heading_str = titlestr if phy_stats_str != "" and self.app.sideband.config["advanced_stats"]: heading_str += phy_stats_str+"\n" + # TODO: Remove + # if stamp_valid: + # txstr += f" [b]Stamp[/b] value is {stamp_value} " heading_str += "[b]Sent[/b] "+txstr heading_str += "\n[b]Received[/b] "+rxstr @@ -379,15 +543,39 @@ class Messages(): heading_str += str(attachment[0])+", " heading_str = heading_str[:-2] + if has_audio: + alstr = RNS.prettysize(len(audio_field[1])) + heading_str += f"\n[b]Audio Message[/b] ({alstr})" + item = ListLXMessageCard( text=pre_content+message_markup.decode("utf-8")+extra_content, heading=heading_str, md_bg_color=msg_color, ) + item.lsource = m["source"] + item.has_audio = False if has_attachment: item.attachments_field = attachments_field + if has_audio: + def play_audio(sender): + self.app.play_audio_field(sender.audio_field) + stored_color = sender.md_bg_color + if sender.lsource == self.app.sideband.lxmf_destination.hash: + sender.md_bg_color = mdc(color_delivered, intensity_play) + else: + sender.md_bg_color = mdc(color_received, intensity_play) + + def cb(dt): + sender.md_bg_color = stored_color + Clock.schedule_once(cb, 0.25) + + item.has_audio = True + item.audio_size = len(audio_field[1]) + item.audio_field = audio_field + item.bind(on_release=play_audio) + if image_field != None: item.has_image = True item.image_field = image_field @@ -480,6 +668,15 @@ class Messages(): return x + def gen_details(mhash, item): + def x(): + item.dmenu.dismiss() + def cb(dt): + self.message_details_dialog(mhash) + Clock.schedule_once(cb, 0.2) + + return x + def gen_copy(msg, item): def x(): Clipboard.copy(msg) @@ -741,6 +938,14 @@ class Messages(): "height": dp(40), "on_release": gen_retry(m["hash"], m["content"], item) } + + details_item = { + "viewclass": "OneLineListItem", + "text": "Details", + "height": dp(40), + "on_release": gen_details(m["hash"], item) + } + if m["method"] == LXMF.LXMessage.PAPER: if RNS.vendor.platformutils.is_android(): qr_save_text = "Share QR Code" @@ -825,6 +1030,7 @@ class Messages(): else: if telemeter != None: dm_items = [ + details_item, { "viewclass": "OneLineListItem", "text": "Copy", @@ -847,6 +1053,7 @@ class Messages(): else: dm_items = [ + details_item, { "viewclass": "OneLineListItem", "text": "Copy", @@ -965,7 +1172,27 @@ MDScreen: icon: "key-wireless" text: "Query Network For Keys" on_release: root.app.key_query_action(self) - + + BoxLayout: + id: message_ptt + padding: [dp(16), dp(8), dp(16), dp(8)] + spacing: dp(24) + size_hint_y: None + height: self.minimum_height + + MDRectangleFlatIconButton: + id: message_ptt_button + icon: "microphone" + text: "PTT" + size_hint_x: 1.0 + padding: [dp(10), dp(13), dp(10), dp(14)] + icon_size: dp(24) + font_size: dp(16) + on_press: root.app.message_ptt_down_action(self) + on_release: root.app.message_ptt_up_action(self) + _no_ripple_effect: True + background_normal: "" + background_down: "" BoxLayout: id: message_input_part diff --git a/sbapp/ui/objectdetails.py b/sbapp/ui/objectdetails.py index 43d9adf..6acb51f 100644 --- a/sbapp/ui/objectdetails.py +++ b/sbapp/ui/objectdetails.py @@ -41,6 +41,7 @@ class ObjectDetails(): self.raw_telemetry = None self.from_telemetry = False self.from_conv = False + self.from_objects = False self.viewing_self = False self.delete_dialog = None @@ -84,6 +85,8 @@ class ObjectDetails(): else: if self.from_conv: self.app.open_conversation(self.object_hash, direction="right") + elif self.from_objects: + self.app.objects_action(direction="right") else: self.app.close_sub_map_action() @@ -117,11 +120,11 @@ class ObjectDetails(): def reload_telemetry(self, sender=None, notoast=False): if self.object_hash != None: - self.set_source(self.object_hash, from_conv=self.from_conv, from_telemetry=self.from_telemetry) + self.set_source(self.object_hash, from_conv=self.from_conv, from_objects=self.from_objects, from_telemetry=self.from_telemetry) if not notoast: toast("Reloaded telemetry for object") - def set_source(self, source_dest, from_conv=False, from_telemetry=False, prefetched=None): + def set_source(self, source_dest, from_conv=False, from_objects=False, from_telemetry=False, prefetched=None): try: self.object_hash = source_dest own_address = self.app.sideband.lxmf_destination.hash @@ -140,6 +143,10 @@ class ObjectDetails(): self.from_conv = True else: self.from_conv = False + if from_objects: + self.from_objects = True + else: + self.from_objects = False self.coords = None self.telemetry_list.data = [] @@ -298,16 +305,29 @@ class RVDetails(MDRecycleView): rendered_telemetry = [] sort = { + "Information": 5, "Physical Link": 10, "Location": 20, "Ambient Light": 30, "Ambient Temperature": 40, "Relative Humidity": 50, "Ambient Pressure": 60, + "Magnetic Field": 61, + "Gravity": 62, + "Angular Velocity": 63, + "Acceleration": 64, + "Proximity": 65, "Battery": 70, - "Timestamp": 80, - "Received": 90, - "Information": 5, + "Processor": 72, + "Random Access Memory": 74, + "Non-Volatile Memory": 76, + "Power Consumption": 80, + "Power Production": 81, + "Tank": 90, + "Fuel": 91, + "Custom": 100, + "Timestamp": 190, + "Received": 200, } def pass_job(sender=None): @@ -316,328 +336,458 @@ class RVDetails(MDRecycleView): self.entries = [] rendered_telemetry.sort(key=lambda s: sort[s["name"]] if s["name"] in sort else 1000) for s in rendered_telemetry: - extra_entries = [] - release_function = pass_job - formatted_values = None - name = s["name"] - if name == "Timestamp": - ts = s["values"]["UTC"] - if ts != None: - ts_str = datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S") - formatted_values = f"Recorded [b]{RNS.prettytime(time.time()-ts, compact=True)} ago[/b] ({ts_str})" - def copy_info(e=None): - Clipboard.copy(ts_str) - toast("Copied to clipboard") - release_function = copy_info - elif name == "Information": - info = s["values"]["contents"] - if info != None: - istr = str(info) - def copy_info(e=None): - Clipboard.copy(istr) - toast("Copied to clipboard") - release_function = copy_info - external_text = multilingual_markup(escape_markup(istr).encode("utf-8")).decode("utf-8") - formatted_values = f"[b]Information[/b]: {external_text}" - elif name == "Received": - formatted_values = "" - by = s["values"]["by"]; - via = s["values"]["via"]; - - if by == self.app.sideband.lxmf_destination.hash: - if via == self.delegate.object_hash: - formatted_values = "Collected directly by [b]this device[/b], directly [b]from emitter[/b]" - else: - via_str = self.app.sideband.peer_display_name(via) - if via_str == None: - via_str = "an [b]unknown peer[/b]" - formatted_values = f"Collected directly by [b]this device[/b], via {via_str}" - else: - if via != None and via == by: - vstr = self.app.sideband.peer_display_name(via) - formatted_values = f"Received from, and collected by [b]{vstr}[/b]" - - else: - if via != None: - vstr = self.app.sideband.peer_display_name(via) - via_str = f"Received from [b]{vstr}[/b]" - else: - via_str = "Received from an [b]unknown peer[/b]" - - if by != None: - dstr = self.app.sideband.peer_display_name(by) - by_str = f", collected by [b]{dstr}[/b]" - else: - by_str = f", collected by an [b]unknown peer[/b]" - - formatted_values = f"{via_str}{by_str}" - - if formatted_values == "": - formatted_values = None - - if not by == self.app.sideband.lxmf_destination.hash and not self.app.sideband.is_trusted(by): - extra_entries.append({"icon": "alert", "text": "Collected by a [b]non-trusted[/b] peer"}) + try: + extra_entries = [] + release_function = pass_job + formatted_values = None + name = s["name"] - elif name == "Battery": - p = s["values"]["percent"] - cs = s["values"]["_meta"] - if cs != None: cs_str = f" ({cs})" - if p != None: formatted_values = f"{name} [b]{p}%[/b]"+cs_str - elif name == "Ambient Pressure": - p = s["values"]["mbar"] - if p != None: formatted_values = f"{name} [b]{p} mbar[/b]" - dt = "mbar" - if "deltas" in s and dt in s["deltas"] and s["deltas"][dt] != None: - d = s["deltas"][dt] - formatted_values += f" (Δ = {d} mbar)" - elif name == "Ambient Temperature": - c = s["values"]["c"] - if c != None: formatted_values = f"{name} [b]{c}° C[/b]" - dt = "c" - if "deltas" in s and dt in s["deltas"] and s["deltas"][dt] != None: - d = s["deltas"][dt] - formatted_values += f" (Δ = {d}° C)" - elif name == "Relative Humidity": - r = s["values"]["percent"] - if r != None: formatted_values = f"{name} [b]{r}%[/b]" - dt = "percent" - if "deltas" in s and dt in s["deltas"] and s["deltas"][dt] != None: - d = s["deltas"][dt] - formatted_values += f" (Δ = {d}%)" - elif name == "Physical Link": - rssi = s["values"]["rssi"]; rssi_str = None - snr = s["values"]["snr"]; snr_str = None - q = s["values"]["q"]; q_str = None - if q != None: q_str = f"Link Quality [b]{q}%[/b]" - if rssi != None: - rssi_str = f"RSSI [b]{rssi} dBm[/b]" - if q != None: rssi_str = ", "+rssi_str - if snr != None: - snr_str = f"SNR [b]{snr} dB[/b]" - if q != None or rssi != None: snr_str = ", "+snr_str - if q_str or rssi_str or snr_str: - formatted_values = q_str+rssi_str+snr_str - elif name == "Power Consumption": - cs = s["values"] - if cs != None: - for c in cs: - label = c["label"] - watts = c["w"] - prefix = "" - if watts < 1/1e6: - watts *= 1e9 - prefix = "n" - elif watts < 1/1e3: - watts *= 1e6 - prefix = "µ" - elif watts < 1: - watts *= 1e3 - prefix = "m" - elif watts >= 1e15: - watts /= 1e15 - prefix = "E" - elif watts >= 1e12: - watts /= 1e12 - prefix = "T" - elif watts >= 1e9: - watts /= 1e9 - prefix = "G" - elif watts >= 1e6: - watts /= 1e6 - prefix = "M" - elif watts >= 1e3: - watts /= 1e3 - prefix = "K" + if name == "Timestamp": + ts = s["values"]["UTC"] + if ts != None: + ts_str = datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S") + formatted_values = f"Recorded [b]{RNS.prettytime(time.time()-ts, compact=True)} ago[/b] ({ts_str})" + def copy_info(e=None): + Clipboard.copy(ts_str) + toast("Copied to clipboard") + release_function = copy_info + + elif name == "Information": + info = s["values"]["contents"] + if info != None: + istr = str(info) + def copy_info(e=None): + Clipboard.copy(istr) + toast("Copied to clipboard") + release_function = copy_info + external_text = multilingual_markup(escape_markup(istr).encode("utf-8")).decode("utf-8") + formatted_values = f"[b]Information[/b]: {external_text}" + + elif name == "Received": + formatted_values = "" + by = s["values"]["by"]; + via = s["values"]["via"]; - watts = round(watts, 2) - p_text = f"{label} [b]{watts} {prefix}W[/b]" - extra_entries.append({"icon": s["icon"], "text": p_text}) - - elif name == "Power Production": - cs = s["values"] - if cs != None: - for c in cs: - label = c["label"] - watts = c["w"] - prefix = "" - if watts < 1/1e6: - watts *= 1e9 - prefix = "n" - elif watts < 1/1e3: - watts *= 1e6 - prefix = "µ" - elif watts < 1: - watts *= 1e3 - prefix = "m" - elif watts >= 1e15: - watts /= 1e15 - prefix = "E" - elif watts >= 1e12: - watts /= 1e12 - prefix = "T" - elif watts >= 1e9: - watts /= 1e9 - prefix = "G" - elif watts >= 1e6: - watts /= 1e6 - prefix = "M" - elif watts >= 1e3: - watts /= 1e3 - prefix = "K" - - watts = round(watts, 2) - p_text = f"{label} [b]{watts} {prefix}W[/b]" - extra_entries.append({"icon": s["icon"], "text": p_text}) - - elif name == "Location": - lat = s["values"]["latitude"] - lon = s["values"]["longitude"] - alt = s["values"]["altitude"] - speed = s["values"]["speed"] - heading = s["values"]["heading"] - accuracy = s["values"]["accuracy"] - updated = s["values"]["updated"] - updated_str = f", logged [b]{RNS.prettytime(time.time()-updated, compact=True)} ago[/b]" - - coords = f"{lat}, {lon}" - fcoords = f"{round(lat,4)}, {round(lon,4)}" - self.delegate.coords = coords - if alt == 0: - alt_str = "0" - else: - alt_str = RNS.prettydistance(alt) - formatted_values = f"Coordinates [b]{fcoords}[/b], altitude [b]{alt_str}[/b]" - if speed != None: - if speed > 0.02: - speed_formatted_values = f"Speed [b]{speed} Km/h[/b], heading [b]{heading}°[/b]" - else: - # speed_formatted_values = f"Speed [b]0 Km/h[/b]" - speed_formatted_values = f"Object is [b]stationary[/b]" - else: - speed_formatted_values = None - extra_formatted_values = f"Uncertainty [b]{accuracy} meters[/b]"+updated_str - - data = {"icon": s["icon"], "text": f"{formatted_values}"} - - extra_entries.append({"icon": "map-marker-question", "text": extra_formatted_values}) - if speed_formatted_values != None: - extra_entries.append({"icon": "speedometer", "text": speed_formatted_values}) - - if "distance" in s: - if "orthodromic" in s["distance"]: - od = s["distance"]["orthodromic"] - if od != None: - od_text = f"Geodesic distance [b]{RNS.prettydistance(od)}[/b]" - extra_entries.append({"icon": "earth", "text": od_text}) - - if "euclidian" in s["distance"]: - ed = s["distance"]["euclidian"] - if ed != None: - ed_text = f"Euclidian distance [b]{RNS.prettydistance(ed)}[/b]" - extra_entries.append({"icon": "axis-arrow", "text": ed_text}) - - if "vertical" in s["distance"]: - vd = s["distance"]["vertical"] - if vd != None: - if vd < 0: - relstr = "lower" - vd = abs(vd) - else: - relstr = "greater" - vd_text = f"Altitude is [b]{RNS.prettydistance(vd)}[/b] {relstr} than this device" - extra_entries.append({"icon": "altimeter", "text": vd_text}) - - if "angle_to_horizon" in s["values"]: - oath = s["values"]["angle_to_horizon"] - if oath != None: - if self.delegate.viewing_self: - oath_text = f"Local horizon is at [b]{round(oath,3)}°[/b]" + if by == self.app.sideband.lxmf_destination.hash: + if via == self.delegate.object_hash: + formatted_values = "Collected directly by [b]this device[/b], directly [b]from emitter[/b]" else: - oath_text = f"Object's horizon is at [b]{round(oath,3)}°[/b]" - extra_entries.append({"icon": "arrow-split-horizontal", "text": oath_text}) - - if self.delegate.viewing_self and "radio_horizon" in s["values"]: - orh = s["values"]["radio_horizon"] - if orh != None: - range_text = RNS.prettydistance(orh) - rh_formatted_text = f"Radio horizon of [b]{range_text}[/b]" - extra_entries.append({"icon": "radio-tower", "text": rh_formatted_text}) - - if "azalt" in s and "local_angle_to_horizon" in s["azalt"]: - lath = s["azalt"]["local_angle_to_horizon"] - if lath != None: - lath_text = f"Local horizon is at [b]{round(lath,3)}°[/b]" - extra_entries.append({"icon": "align-vertical-distribute", "text": lath_text}) - - if "azalt" in s: - azalt_formatted_text = "" - if "azimuth" in s["azalt"]: - az = s["azalt"]["azimuth"] - az_text = f"Azimuth [b]{round(az,3)}°[/b]" - azalt_formatted_text += az_text - - if "altitude" in s["azalt"]: - al = s["azalt"]["altitude"] - al_text = f"altitude [b]{round(al,3)}°[/b]" - if len(azalt_formatted_text) != 0: azalt_formatted_text += ", " - azalt_formatted_text += al_text - - extra_entries.append({"icon": "compass-rose", "text": azalt_formatted_text}) - - if "above_horizon" in s["azalt"]: - astr = "above" if s["azalt"]["above_horizon"] == True else "below" - dstr = str(round(s["azalt"]["altitude_delta"], 3)) - ah_text = f"Object is [b]{astr}[/b] the horizon (Δ = {dstr}°)" - extra_entries.append({"icon": "angle-acute", "text": ah_text}) - - if not self.delegate.viewing_self and "radio_horizon" in s["values"]: - orh = s["values"]["radio_horizon"] - if orh != None: - range_text = RNS.prettydistance(orh) - rh_formatted_text = f"Object's radio horizon is [b]{range_text}[/b]" - extra_entries.append({"icon": "radio-tower", "text": rh_formatted_text}) - - if "radio_horizon" in s: - rh_icon = "circle-outline" - crange_text = RNS.prettydistance(s["radio_horizon"]["combined_range"]) - if s["radio_horizon"]["within_range"]: - rh_formatted_text = f"[b]Within[/b] shared radio horizon of [b]{crange_text}[/b]" - rh_icon = "set-none" + via_str = self.app.sideband.peer_display_name(via) + if via_str == None: + via_str = "an [b]unknown peer[/b]" + formatted_values = f"Collected directly by [b]this device[/b], via {via_str}" else: - rh_formatted_text = f"[b]Outside[/b] shared radio horizon of [b]{crange_text}[/b]" + if via != None and via == by: + vstr = self.app.sideband.peer_display_name(via) + formatted_values = f"Received from, and collected by [b]{vstr}[/b]" + + else: + if via != None: + vstr = self.app.sideband.peer_display_name(via) + via_str = f"Received from [b]{vstr}[/b]" + else: + via_str = "Received from an [b]unknown peer[/b]" + + if by != None: + dstr = self.app.sideband.peer_display_name(by) + by_str = f", collected by [b]{dstr}[/b]" + else: + by_str = f", collected by an [b]unknown peer[/b]" + + formatted_values = f"{via_str}{by_str}" + + if formatted_values == "": + formatted_values = None + + if not by == self.app.sideband.lxmf_destination.hash and not self.app.sideband.is_trusted(by): + extra_entries.append({"icon": "alert", "text": "Collected by a [b]non-trusted[/b] peer"}) - extra_entries.append({"icon": rh_icon, "text": rh_formatted_text}) + elif name == "Battery": + p = s["values"]["percent"] + cs = s["values"]["_meta"] + t = None + if "temperature" in s["values"]: + t = s["values"]["temperature"] + if cs != None: + if t != None: + cs_str = f" ({cs}, {t}° C)" + else: + cs_str = f" ({cs})" - def select(e=None): - geo_uri = f"geo:{lat},{lon}" - def lj(): - webbrowser.open(geo_uri) - threading.Thread(target=lj, daemon=True).start() - - release_function = select - else: - formatted_values = f"{name}" - for vn in s["values"]: - v = s["values"][vn] - formatted_values += f" [b]{v} {vn}[/b]" - - dt = vn + if p != None: formatted_values = f"{name} [b]{p}%[/b]"+cs_str + + elif name == "Ambient Pressure": + p = s["values"]["mbar"] + if p != None: formatted_values = f"{name} [b]{p} mbar[/b]" + dt = "mbar" if "deltas" in s and dt in s["deltas"] and s["deltas"][dt] != None: d = s["deltas"][dt] - formatted_values += f" (Δ = {d} {vn})" - - data = None - if formatted_values != None: - if release_function: - data = {"icon": s["icon"], "text": f"{formatted_values}", "on_release": release_function} + formatted_values += f" (Δ = {d} mbar)" + + elif name == "Ambient Temperature": + c = s["values"]["c"] + if c != None: formatted_values = f"{name} [b]{c}° C[/b]" + dt = "c" + if "deltas" in s and dt in s["deltas"] and s["deltas"][dt] != None: + d = s["deltas"][dt] + formatted_values += f" (Δ = {d}° C)" + + elif name == "Relative Humidity": + r = s["values"]["percent"] + if r != None: formatted_values = f"{name} [b]{r}%[/b]" + dt = "percent" + if "deltas" in s and dt in s["deltas"] and s["deltas"][dt] != None: + d = s["deltas"][dt] + formatted_values += f" (Δ = {d}%)" + + elif name == "Physical Link": + rssi = s["values"]["rssi"]; rssi_str = None + snr = s["values"]["snr"]; snr_str = None + q = s["values"]["q"]; q_str = None + if q != None: q_str = f"Link Quality [b]{q}%[/b]" + if rssi != None: + rssi_str = f"RSSI [b]{rssi} dBm[/b]" + if q != None: rssi_str = ", "+rssi_str + if snr != None: + snr_str = f"SNR [b]{snr} dB[/b]" + if q != None or rssi != None: snr_str = ", "+snr_str + if q_str or rssi_str or snr_str: + formatted_values = q_str+rssi_str+snr_str + + elif name == "Power Consumption": + cs = s["values"] + if cs != None: + for c in cs: + label = c["label"] + cicon = c["custom_icon"] + watts = c["w"] + prefix = "" + if watts < 1/1e6: + watts *= 1e9 + prefix = "n" + elif watts < 1/1e3: + watts *= 1e6 + prefix = "µ" + elif watts < 1: + watts *= 1e3 + prefix = "m" + elif watts >= 1e15: + watts /= 1e15 + prefix = "E" + elif watts >= 1e12: + watts /= 1e12 + prefix = "T" + elif watts >= 1e9: + watts /= 1e9 + prefix = "G" + elif watts >= 1e6: + watts /= 1e6 + prefix = "M" + elif watts >= 1e3: + watts /= 1e3 + prefix = "K" + + if cicon: + set_icon = cicon + else: + set_icon = s["icon"] + + watts = round(watts, 2) + p_text = f"{label} [b]{watts} {prefix}W[/b]" + extra_entries.append({"icon": set_icon, "text": p_text}) + + elif name == "Power Production": + cs = s["values"] + if cs != None: + for c in cs: + label = c["label"] + cicon = c["custom_icon"] + watts = c["w"] + prefix = "" + if watts < 1/1e6: + watts *= 1e9 + prefix = "n" + elif watts < 1/1e3: + watts *= 1e6 + prefix = "µ" + elif watts < 1: + watts *= 1e3 + prefix = "m" + elif watts >= 1e15: + watts /= 1e15 + prefix = "E" + elif watts >= 1e12: + watts /= 1e12 + prefix = "T" + elif watts >= 1e9: + watts /= 1e9 + prefix = "G" + elif watts >= 1e6: + watts /= 1e6 + prefix = "M" + elif watts >= 1e3: + watts /= 1e3 + prefix = "K" + + if cicon: + set_icon = cicon + else: + set_icon = s["icon"] + + watts = round(watts, 2) + p_text = f"{label} [b]{watts} {prefix}W[/b]" + extra_entries.append({"icon": set_icon, "text": p_text}) + + elif name == "Custom": + cs = s["values"] + if cs != None: + for c in cs: + label = c["label"] + cicon = c["custom_icon"] + value = str(c["value"]) + set_icon = cicon if cicon else s["icon"] + e_text = f"{label} [b]{value}[/b]" + extra_entries.append({"icon": set_icon, "text": e_text}) + + elif name == "Tank": + cs = s["values"] + if cs != None: + for c in cs: + label = c["label"] + cicon = c["custom_icon"] + unit = c["unit"] + cap = round(c["capacity"], 1) + lvl = round(c["level"], 1) + free = round(c["free"], 1) + pct = round(c["percent"], 1) + + set_icon = cicon if cicon else s["icon"] + e_text = f"{label} level is [b]{lvl} {unit}[/b] ([b]{pct}%[/b])" + extra_entries.append({"icon": set_icon, "text": e_text}) + + elif name == "Fuel": + cs = s["values"] + if cs != None: + for c in cs: + label = c["label"] + cicon = c["custom_icon"] + unit = c["unit"] + cap = round(c["capacity"], 1) + lvl = round(c["level"], 1) + free = round(c["free"], 1) + pct = round(c["percent"], 1) + + set_icon = cicon if cicon else s["icon"] + e_text = f"{label} level is [b]{lvl} {unit}[/b] ([b]{pct}%[/b])" + extra_entries.append({"icon": set_icon, "text": e_text}) + + elif name == "Processor": + cs = s["values"] + if cs != None: + for c in cs: + label = c["label"] + load = c["current_load"] + avgs = c["load_avgs"] + clock = c["clock"] + pct = round(load*100.0, 1) + + avgs_str = f", averages are [b]{round(avgs[0],2)}[/b], [b]{round(avgs[1],2)}[/b], [b]{round(avgs[2],2)}[/b]" if avgs != None and len(avgs) == 3 else "" + clock_str = " at [b]"+RNS.prettyfrequency(clock)+"[/b]" if clock != None else "" + + e_text = f"Using [b]{pct}%[/b] of {label}{clock_str}{avgs_str}" + e_text = f"{label} use is [b]{pct}%[/b]{clock_str}{avgs_str}" + extra_entries.append({"icon": s["icon"], "text": e_text}) + + elif name == "Non-Volatile Memory": + cs = s["values"] + if cs != None: + for c in cs: + label = c["label"] + cap = RNS.prettysize(c["capacity"]) + use = RNS.prettysize(c["used"]) + free = RNS.prettysize(c["free"]) + pct = round(c["percent"], 1) + + e_text = f"{label} use is [b]{use}[/b] ([b]{pct}%[/b]) of [b]{cap}[/b], with [b]{free}[/b] free" + extra_entries.append({"icon": s["icon"], "text": e_text}) + + elif name == "Random Access Memory": + cs = s["values"] + if cs != None: + for c in cs: + label = c["label"] + cap = RNS.prettysize(c["capacity"]) + use = RNS.prettysize(c["used"]) + free = RNS.prettysize(c["free"]) + pct = round(c["percent"], 1) + + e_text = f"{label} use is [b]{use}[/b] ([b]{pct}%[/b]) of [b]{cap}[/b], with [b]{free}[/b] free" + extra_entries.append({"icon": s["icon"], "text": e_text}) + + elif name == "Location": + lat = s["values"]["latitude"] + lon = s["values"]["longitude"] + alt = s["values"]["altitude"] + speed = s["values"]["speed"] + heading = s["values"]["heading"] + accuracy = s["values"]["accuracy"] + updated = s["values"]["updated"] + updated_str = f", logged [b]{RNS.prettytime(time.time()-updated, compact=True)} ago[/b]" + + coords = f"{lat}, {lon}" + fcoords = f"{round(lat,4)}, {round(lon,4)}" + self.delegate.coords = coords + if alt == 0: + alt_str = "0" + else: + alt_str = RNS.prettydistance(alt) + formatted_values = f"Coordinates [b]{fcoords}[/b], altitude [b]{alt_str}[/b]" + if speed != None: + if speed > 0.02: + speed_formatted_values = f"Speed [b]{speed} Km/h[/b], heading [b]{heading}°[/b]" + else: + # speed_formatted_values = f"Speed [b]0 Km/h[/b]" + speed_formatted_values = f"Object is [b]stationary[/b]" + else: + speed_formatted_values = None + extra_formatted_values = f"Uncertainty [b]{accuracy} meters[/b]"+updated_str + + data = {"icon": s["icon"], "text": f"{formatted_values}"} + + extra_entries.append({"icon": "map-marker-question", "text": extra_formatted_values}) + if speed_formatted_values != None: + extra_entries.append({"icon": "speedometer", "text": speed_formatted_values}) + + if "distance" in s: + if "orthodromic" in s["distance"]: + od = s["distance"]["orthodromic"] + if od != None: + od_text = f"Geodesic distance [b]{RNS.prettydistance(od)}[/b]" + extra_entries.append({"icon": "earth", "text": od_text}) + + if "euclidian" in s["distance"]: + ed = s["distance"]["euclidian"] + if ed != None: + ed_text = f"Euclidian distance [b]{RNS.prettydistance(ed)}[/b]" + extra_entries.append({"icon": "axis-arrow", "text": ed_text}) + + if "vertical" in s["distance"]: + vd = s["distance"]["vertical"] + if vd != None: + if vd < 0: + relstr = "lower" + vd = abs(vd) + else: + relstr = "greater" + vd_text = f"Altitude is [b]{RNS.prettydistance(vd)}[/b] {relstr} than this device" + extra_entries.append({"icon": "altimeter", "text": vd_text}) + + if "angle_to_horizon" in s["values"]: + oath = s["values"]["angle_to_horizon"] + if oath != None: + if self.delegate.viewing_self: + oath_text = f"Local horizon is at [b]{round(oath,3)}°[/b]" + else: + oath_text = f"Object's horizon is at [b]{round(oath,3)}°[/b]" + extra_entries.append({"icon": "arrow-split-horizontal", "text": oath_text}) + + if self.delegate.viewing_self and "radio_horizon" in s["values"]: + orh = s["values"]["radio_horizon"] + if orh != None: + range_text = RNS.prettydistance(orh) + rh_formatted_text = f"Radio horizon of [b]{range_text}[/b]" + extra_entries.append({"icon": "radio-tower", "text": rh_formatted_text}) + + if "azalt" in s and "local_angle_to_horizon" in s["azalt"]: + lath = s["azalt"]["local_angle_to_horizon"] + if lath != None: + lath_text = f"Local horizon is at [b]{round(lath,3)}°[/b]" + extra_entries.append({"icon": "align-vertical-distribute", "text": lath_text}) + + if "azalt" in s: + azalt_formatted_text = "" + if "azimuth" in s["azalt"]: + az = s["azalt"]["azimuth"] + az_text = f"Azimuth [b]{round(az,3)}°[/b]" + azalt_formatted_text += az_text + + if "altitude" in s["azalt"]: + al = s["azalt"]["altitude"] + al_text = f"altitude [b]{round(al,3)}°[/b]" + if len(azalt_formatted_text) != 0: azalt_formatted_text += ", " + azalt_formatted_text += al_text + + extra_entries.append({"icon": "compass-rose", "text": azalt_formatted_text}) + + if "above_horizon" in s["azalt"]: + astr = "above" if s["azalt"]["above_horizon"] == True else "below" + dstr = str(round(s["azalt"]["altitude_delta"], 3)) + ah_text = f"Object is [b]{astr}[/b] the horizon (Δ = {dstr}°)" + extra_entries.append({"icon": "angle-acute", "text": ah_text}) + + if not self.delegate.viewing_self and "radio_horizon" in s["values"]: + orh = s["values"]["radio_horizon"] + if orh != None: + range_text = RNS.prettydistance(orh) + rh_formatted_text = f"Object's radio horizon is [b]{range_text}[/b]" + extra_entries.append({"icon": "radio-tower", "text": rh_formatted_text}) + + if "radio_horizon" in s: + rh_icon = "circle-outline" + crange_text = RNS.prettydistance(s["radio_horizon"]["combined_range"]) + if s["radio_horizon"]["within_range"]: + rh_formatted_text = f"[b]Within[/b] shared radio horizon of [b]{crange_text}[/b]" + rh_icon = "set-none" + else: + rh_formatted_text = f"[b]Outside[/b] shared radio horizon of [b]{crange_text}[/b]" + + extra_entries.append({"icon": rh_icon, "text": rh_formatted_text}) + + def select(e=None): + geo_uri = f"geo:{lat},{lon}" + def lj(): + webbrowser.open(geo_uri) + threading.Thread(target=lj, daemon=True).start() + + release_function = select else: - data = {"icon": s["icon"], "text": f"{formatted_values}", "on_release": pass_job} + formatted_values = f"{name}" + for vn in s["values"]: + v = s["values"][vn] + formatted_values += f" [b]{v} {vn}[/b]" - if data != None: - self.entries.append(data) - for extra in extra_entries: - self.entries.append(extra) + dt = vn + if "deltas" in s and dt in s["deltas"] and s["deltas"][dt] != None: + d = s["deltas"][dt] + formatted_values += f" (Δ = {d} {vn})" + formatted_values += ", " + formatted_values = formatted_values[:-2] + + data = None + if formatted_values != None: + if release_function: + data = {"icon": s["icon"], "text": f"{formatted_values}", "on_release": release_function} + else: + data = {"icon": s["icon"], "text": f"{formatted_values}", "on_release": pass_job} + + if data != None: + self.entries.append(data) + for extra in extra_entries: + self.entries.append(extra) + + except Exception as e: + RNS.log("An error ocurred while displaying telemetry for object", RNS.LOG_ERROR) + RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR) + RNS.trace_exception(e) + + try: + ratchet_id = RNS.Identity.current_ratchet_id(self.delegate.object_hash) + if ratchet_id: + self.entries.append({"icon": "lock-check-outline", "text": f"Using ratchet [b]{RNS.prettyhexrep(ratchet_id)}[/b]", "on_release": pass_job}) + + except Exception as e: + RNS.trace_exception(e) try: nh = RNS.Transport.hops_to(self.delegate.object_hash) @@ -663,6 +813,23 @@ class RVDetails(MDRecycleView): except Exception as e: RNS.trace_exception(e) + try: + ticket_expires = self.delegate.app.sideband.message_router.get_outbound_ticket_expiry(self.delegate.object_hash) + stamp_cost = self.delegate.app.sideband.message_router.get_outbound_stamp_cost(self.delegate.object_hash) + t_str = "" + if ticket_expires: + t_str = ", but have ticket" + if stamp_cost: + self.entries.append({"icon": "postage-stamp", "text": f"Required stamp cost [b]{stamp_cost}[/b]"+t_str, "on_release": pass_job}) + else: + self.entries.append({"icon": "postage-stamp", "text": f"No required stamp cost", "on_release": pass_job}) + if ticket_expires: + valid_for = ticket_expires - time.time() + self.entries.append({"icon": "ticket-confirmation", "text": f"Delivery ticket valid for [b]{RNS.prettytime(valid_for)}[/b]", "on_release": pass_job}) + + except Exception as e: + RNS.trace_exception(e) + if len(self.entries) == 0: self.entries.append({"icon": "timeline-question-outline", "text": f"No telemetry available for this device"}) diff --git a/setup.py b/setup.py index eacbc77..518fd2d 100644 --- a/setup.py +++ b/setup.py @@ -47,6 +47,13 @@ def glob_paths(pattern): return out_files +packages = setuptools.find_packages( + exclude=[ + "sbapp.plyer.platforms.android", + "sbapp.kivymd.tools" + "sbapp.kivymd.tools.*" + ]) + package_data = { "": [ "assets/*", @@ -71,7 +78,7 @@ setuptools.setup( long_description=long_description, long_description_content_type="text/markdown", url="https://unsigned.io/sideband", - packages=setuptools.find_packages(), + packages=packages, package_data=package_data, include_package_data=True, classifiers=[ @@ -88,9 +95,21 @@ setuptools.setup( 'sideband=sbapp:main.run', ] }, - install_requires=["rns>=0.7.5", "lxmf>=0.4.3", "kivy>=2.3.0", "plyer", "pillow>=10.2.0", "qrcode", "materialyoucolor>=2.0.7"], - extras_require={ - "macos": ["pyobjus"], - }, + install_requires=[ + "rns>=0.7.7", + "lxmf>=0.5.1", + "kivy>=2.3.0", + "pillow>=10.2.0", + "qrcode", + "materialyoucolor>=2.0.7", + "ffpyplayer", + "sh", + "numpy<=1.26.4", + "pycodec2;platform_system!='Windows'", + "pyaudio;sys.platform=='linux'", + "pyobjus;sys.platform=='darwin'", + "pyogg;sys.platform=='darwin'", + "pyogg;platform_system=='Windows'", + ], python_requires='>=3.7', )