diff --git a/README.md b/README.md index 70a4e11..0063719 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ - Copyright: (C) Qianqian Fang (2019-2024) - License: Apache License, Version 2.0 -- Version: 0.5.3 +- Version: 0.5.5 - URL: https://github.com/NeuroJSON/pyjdata ![Build Status](https://github.com/NeuroJSON/pyjdata/actions/workflows/run_test.yml/badge.svg) diff --git a/jdata/__init__.py b/jdata/__init__.py index 62e1d5c..d39ab89 100644 --- a/jdata/__init__.py +++ b/jdata/__init__.py @@ -33,25 +33,42 @@ to restore the original data types """ -from .jfile import load, save, show, loadt, savet, loadb, saveb, jext +from .jfile import ( + load, + save, + show, + loadt, + savet, + loadts, + loadbs, + loadb, + saveb, + jsoncache, + jdlink, + jext, +) from .jdata import encode, decode, jdtype, jsonfilter -__version__ = "0.5.3" +__version__ = "0.5.5" __all__ = [ "load", "save", "show", "loadt", "savet", + "loadts", + "loadbs", "loadb", "saveb", "encode", "decode", + "jsoncache", + "jdlink", "jdtype", "jsonfilter", "jext", ] -__license__ = """Apache license 2.0, Copyright (c) 2019-2022 Qianqian Fang""" +__license__ = """Apache license 2.0, Copyright (c) 2019-2024 Qianqian Fang""" if __name__ == "__main__": diff --git a/jdata/__main__.py b/jdata/__main__.py index c25cb2d..4960813 100644 --- a/jdata/__main__.py +++ b/jdata/__main__.py @@ -21,7 +21,9 @@ def main(): # get arguments and invoke the conversion routines # - parser = argparse.ArgumentParser(description="Convert a text JSON/JData file to a binary JSON/JData file and vice versa.") + parser = argparse.ArgumentParser( + description="Convert a text JSON/JData file to a binary JSON/JData file and vice versa." + ) parser.add_argument( "file", diff --git a/jdata/jdata.py b/jdata/jdata.py index d130d90..18db991 100644 --- a/jdata/jdata.py +++ b/jdata/jdata.py @@ -2,7 +2,7 @@ Encoding and decoding python native data structures as portable JData-spec annotated dict structure -Copyright (c) 2019-2022 Qianqian Fang +Copyright (c) 2019-2024 Qianqian Fang """ __all__ = ["encode", "decode", "jdtype", "jsonfilter"] @@ -15,6 +15,9 @@ import copy import zlib import base64 +import os +import re +from .jfile import jdlink ##==================================================================================== ## global variables @@ -131,12 +134,19 @@ def encode(d, opt={}): return newobj elif isinstance(d, np.ndarray) or np.iscomplex(d): newobj = {} - newobj["_ArrayType_"] = jdtype[str(d.dtype)] if (str(d.dtype) in jdtype) else str(d.dtype) + newobj["_ArrayType_"] = ( + jdtype[str(d.dtype)] if (str(d.dtype) in jdtype) else str(d.dtype) + ) if np.isscalar(d): newobj["_ArraySize_"] = 1 else: newobj["_ArraySize_"] = list(d.shape) - if d.dtype == np.complex64 or d.dtype == np.complex128 or d.dtype == np.csingle or d.dtype == np.cdouble: + if ( + d.dtype == np.complex64 + or d.dtype == np.complex128 + or d.dtype == np.csingle + or d.dtype == np.cdouble + ): newobj["_ArrayIsComplex_"] = True newobj["_ArrayData_"] = np.stack((d.ravel().real, d.ravel().imag)) else: @@ -158,15 +168,23 @@ def encode(d, opt={}): newobj["_ArrayZipData_"] = gzipper.compress(newobj["_ArrayZipData_"]) elif opt["compression"] == "lzma": try: - newobj["_ArrayZipData_"] = lzma.compress(newobj["_ArrayZipData_"], lzma.FORMAT_ALONE) + newobj["_ArrayZipData_"] = lzma.compress( + newobj["_ArrayZipData_"], lzma.FORMAT_ALONE + ) except Exception: - print('you must install "lzma" module to compress with this format, ignoring') + print( + 'you must install "lzma" module to compress with this format, ignoring' + ) pass elif opt["compression"] == "lz4": try: - newobj["_ArrayZipData_"] = lz4.frame.compress(newobj["_ArrayZipData_"].tobytes()) + newobj["_ArrayZipData_"] = lz4.frame.compress( + newobj["_ArrayZipData_"].tobytes() + ) except ImportError: - print('you must install "lz4" module to compress with this format, ignoring') + print( + 'you must install "lz4" module to compress with this format, ignoring' + ) pass elif opt["compression"].startswith("blosc2"): try: @@ -187,9 +205,13 @@ def encode(d, opt={}): nthreads=blosc2nthread, ) except ImportError: - print('you must install "blosc2" module to compress with this format, ignoring') + print( + 'you must install "blosc2" module to compress with this format, ignoring' + ) pass - if (("base64" in opt) and (opt["base64"])) or opt["compression"] == "base64": + if (("base64" in opt) and (opt["base64"])) or opt[ + "compression" + ] == "base64": newobj["_ArrayZipData_"] = base64.b64encode(newobj["_ArrayZipData_"]) newobj.pop("_ArrayData_") return newobj @@ -214,8 +236,14 @@ def decode(d, opt={}): """ opt.setdefault("inplace", False) - - if (isinstance(d, str) or type(d) == "unicode") and len(d) <= 6 and len(d) > 4 and d[-1] == "_": + opt.setdefault("maxlinklevel", 0) + + if ( + (isinstance(d, str) or type(d) == "unicode") + and len(d) <= 6 + and len(d) > 4 + and d[-1] == "_" + ): if d == "_NaN_": return float("nan") elif d == "_Inf_": @@ -233,12 +261,16 @@ def decode(d, opt={}): d["_ArraySize_"] = np.frombuffer(bytearray(d["_ArraySize_"])) if "_ArrayZipData_" in d: newobj = d["_ArrayZipData_"] - if (("base64" in opt) and (opt["base64"])) or ("_ArrayZipType_" in d and d["_ArrayZipType_"] == "base64"): + if (("base64" in opt) and (opt["base64"])) or ( + "_ArrayZipType_" in d and d["_ArrayZipType_"] == "base64" + ): newobj = base64.b64decode(newobj) if "_ArrayZipType_" in d and d["_ArrayZipType_"] not in _zipper: raise Exception( "JData", - "compression method {} is not supported".format(d["_ArrayZipType_"]), + "compression method {} is not supported".format( + d["_ArrayZipType_"] + ), ) if d["_ArrayZipType_"] == "zlib": newobj = zlib.decompress(bytes(newobj)) @@ -258,7 +290,9 @@ def decode(d, opt={}): newobj = lz4.frame.decompress(bytes(newobj)) except Exception: - print('Warning: you must install "lz4" module to decompress a data record in this file, ignoring') + print( + 'Warning: you must install "lz4" module to decompress a data record in this file, ignoring' + ) return copy.deepcopy(d) if opt["inplace"] else d elif d["_ArrayZipType_"].startswith("blosc2"): try: @@ -267,15 +301,23 @@ def decode(d, opt={}): blosc2nthread = 1 if "nthread" in opt: blosc2nthread = opt["nthread"] - newobj = blosc2.decompress2(bytes(newobj), as_bytearray=False, nthreads=blosc2nthread) + newobj = blosc2.decompress2( + bytes(newobj), as_bytearray=False, nthreads=blosc2nthread + ) except Exception: - print('Warning: you must install "blosc2" module to decompress a data record in this file, ignoring') + print( + 'Warning: you must install "blosc2" module to decompress a data record in this file, ignoring' + ) return copy.deepcopy(d) if opt["inplace"] else d - newobj = np.frombuffer(bytearray(newobj), dtype=np.dtype(d["_ArrayType_"])).reshape(d["_ArrayZipSize_"]) + newobj = np.frombuffer( + bytearray(newobj), dtype=np.dtype(d["_ArrayType_"]) + ).reshape(d["_ArrayZipSize_"]) if "_ArrayIsComplex_" in d and newobj.shape[0] == 2: newobj = newobj[0] + 1j * newobj[1] if "_ArrayOrder_" in d and ( - d["_ArrayOrder_"].lower() == "c" or d["_ArrayOrder_"].lower() == "col" or d["_ArrayOrder_"].lower() == "column" + d["_ArrayOrder_"].lower() == "c" + or d["_ArrayOrder_"].lower() == "col" + or d["_ArrayOrder_"].lower() == "column" ): newobj = newobj.reshape(d["_ArraySize_"], order="F") else: @@ -285,17 +327,25 @@ def decode(d, opt={}): return newobj elif "_ArrayData_" in d: if isinstance(d["_ArrayData_"], str): - newobj = np.frombuffer(d["_ArrayData_"], dtype=np.dtype(d["_ArrayType_"])) + newobj = np.frombuffer( + d["_ArrayData_"], dtype=np.dtype(d["_ArrayType_"]) + ) else: - newobj = np.asarray(d["_ArrayData_"], dtype=np.dtype(d["_ArrayType_"])) + newobj = np.asarray( + d["_ArrayData_"], dtype=np.dtype(d["_ArrayType_"]) + ) if "_ArrayZipSize_" in d and newobj.shape[0] == 1: if isinstance(d["_ArrayZipSize_"], str): - d["_ArrayZipSize_"] = np.frombuffer(bytearray(d["_ArrayZipSize_"])) + d["_ArrayZipSize_"] = np.frombuffer( + bytearray(d["_ArrayZipSize_"]) + ) newobj = newobj.reshape(d["_ArrayZipSize_"]) if "_ArrayIsComplex_" in d and newobj.shape[0] == 2: newobj = newobj[0] + 1j * newobj[1] if "_ArrayOrder_" in d and ( - d["_ArrayOrder_"].lower() == "c" or d["_ArrayOrder_"].lower() == "col" or d["_ArrayOrder_"].lower() == "column" + d["_ArrayOrder_"].lower() == "c" + or d["_ArrayOrder_"].lower() == "col" + or d["_ArrayOrder_"].lower() == "column" ): newobj = newobj.reshape(d["_ArraySize_"], order="F") else: @@ -308,6 +358,33 @@ def decode(d, opt={}): "JData", "one and only one of _ArrayData_ or _ArrayZipData_ is required", ) + elif "_DataLink_" in d: + if opt["maxlinklevel"] > 0 and "_DataLink_" in data: + if isinstance(data["_DataLink_"], str): + datalink = data["_DataLink_"] + if re.search("\:\$", datalink): + ref = re.search( + "^(?P[a-zA-Z]+://)*(?P.+)(?P\:)()*(?P(?<=:)\$\d*\.*.*)*", + datalink, + ) + else: + ref = re.search( + "^(?P[a-zA-Z]+://)*(?P.+)(?P\:)*(?P(?<=:)\$\d*\..*)*", + datalink, + ) + if ref and ref.group("path"): + uripath = ref.group("proto") + ref.group("path") + newobj, fname = jdlink(uripath) + if os.path.exists(fname): + opt["maxlinklevel"] = opt["maxlinklevel"] - 1 + if ref.group("jsonpath"): + newobj = jsonpath(newdata, ref.group("jsonpath")) + return nrewobj + else: + raise Exception( + "JData", + "_DataLink_ contains invalid URL", + ) return decodedict(d, opt) else: return copy.deepcopy(d) if opt["inplace"] else d diff --git a/jdata/jfile.py b/jdata/jfile.py index 2c9e61e..2cb94b1 100644 --- a/jdata/jfile.py +++ b/jdata/jfile.py @@ -1,10 +1,23 @@ """@package docstring File IO to load/decode JData-based files to Python data or encode/save Python data to JData files -Copyright (c) 2019-2022 Qianqian Fang +Copyright (c) 2019-2024 Qianqian Fang """ -__all__ = ["load", "save", "show", "loadt", "savet", "loadb", "saveb", "jext"] +__all__ = [ + "load", + "save", + "show", + "loadt", + "savet", + "loadts", + "loadbs", + "loadb", + "saveb", + "jsoncache", + "jdlink", + "jext", +] ##==================================================================================== ## dependent libraries @@ -12,7 +25,11 @@ import json import os +import re import jdata as jd +import urllib.request +from hashlib import sha256 +from sys import platform from collections import OrderedDict ##==================================================================================== @@ -20,8 +37,8 @@ ##==================================================================================== jext = { - "t": [".json", ".jdt", ".jdat", ".jnii", ".jmsh", ".jnirs"], - "b": [".ubj", ".bjd", ".jdb", ".jbat", ".bnii", ".bmsh", ".jamm", ".bnirs"], + "t": [".json", ".jdt", ".jdat", ".jnii", ".jmsh", ".jnirs", ".jbids"], + "b": [".ubj", ".bjd", ".jdb", ".jbat", ".bnii", ".bmsh", ".pmat", ".bnirs"], } ##==================================================================================== @@ -35,6 +52,10 @@ def load(fname, opt={}, **kwargs): @param[in] fname: a JData file name (accept .json,.jdat,.jbat,.jnii,.bnii,.jmsh,.bmsh) @param[in] opt: options, if opt['decode']=True or 1 (default), call jdata.decode() after loading """ + if re.match("^https*://", fname): + newdata = downloadlink(fname, opt, **kwargs) + return newdata + spl = os.path.splitext(fname) ext = spl[1].lower() @@ -45,7 +66,11 @@ def load(fname, opt={}, **kwargs): else: raise Exception( "JData", - "file extension is not recognized, accept (" + ",".join(jext["t"]) + ";" + ",".join(jext["b"]) + ")", + "file extension is not recognized, accept (" + + ",".join(jext["t"]) + + ";" + + ",".join(jext["b"]) + + ")", ) @@ -69,7 +94,11 @@ def save(data, fname, opt={}, **kwargs): else: raise Exception( "JData", - "file extension is not recognized, accept (" + ",".join(jext["t"]) + ";" + ",".join(jext["b"]) + ")", + "file extension is not recognized, accept (" + + ",".join(jext["t"]) + + ";" + + ",".join(jext["b"]) + + ")", ) @@ -117,6 +146,53 @@ def savet(data, fname, opt={}, **kwargs): json.dump(data, fid, **kwargs) +##==================================================================================== +## In-memory buffer Parse and dump +##==================================================================================== + + +def loadts(bytes, opt={}, **kwargs): + """@brief Loading a text-based (JSON) JData string buffer and decode it to native Python data + + @param[in] bytes: a JSON string or byte-stream + @param[in] opt: options, if opt['decode']=True or 1 (default), call jdata.decode() after loading + """ + kwargs.setdefault("strict", False) + kwargs.setdefault("object_pairs_hook", OrderedDict) + opt.setdefault("decode", True) + opt.setdefault("inplace", True) + opt["base64"] = True + + data = json.loads(bytes, **kwargs) + + if opt["decode"]: + data = jd.decode(data, opt) + return data + + +def loadbs(bytes, opt={}, **kwargs): + """@brief Loading a binary-JSON/BJData string buffer and decode it to native Python data + + @param[in] bytes: a BJData byte-buffer or byte-stream + @param[in] opt: options, if opt['decode']=True or 1 (default), call jdata.decode() after loading + """ + opt.setdefault("decode", True) + opt.setdefault("inplace", True) + opt["base64"] = False + + try: + import bjdata + except ImportError: + raise ImportError( + 'To read/write binary JData files, you must install the bjdata module by "pip install bjdata"' + ) + else: + data = bjdata.loadb(bytes, **kwargs) + if opt["decode"]: + data = jd.decode(data, opt) + return data + + def show(data, opt={}, **kwargs): """@brief Printing a python data as JSON string or return the JSON string (opt['string']=True) @@ -159,7 +235,9 @@ def loadb(fname, opt={}, **kwargs): try: import bjdata except ImportError: - raise ImportError('To read/write binary JData files, you must install the bjdata module by "pip install bjdata"') + raise ImportError( + 'To read/write binary JData files, you must install the bjdata module by "pip install bjdata"' + ) else: with open(fname, "rb") as fid: data = bjdata.load(fid, **kwargs) @@ -181,7 +259,9 @@ def saveb(data, fname, opt={}, **kwargs): try: import bjdata except ImportError: - raise ImportError('To read/write binary JData files, you must install the bjdata module by "pip install bjdata"') + raise ImportError( + 'To read/write binary JData files, you must install the bjdata module by "pip install bjdata"' + ) else: if opt["encode"]: data = jd.encode(data, opt) @@ -190,5 +270,196 @@ def saveb(data, fname, opt={}, **kwargs): ##==================================================================================== -## helper functions +## Handling externally linked data files ##==================================================================================== + + +def jsoncache(url, opt={}, **kwargs): + """@brief Printing the local folder and file name where a linked data file in the URL to be saved + + @param[in] url: a URL + @param[in] opt: options, if opt['decode']=True or 1 (default), call jdata.decode() before saving + """ + + pathname = os.getenv("HOME") + cachepath = [os.path.join(os.getcwd(), ".neurojson")] + dbname, docname, filename = None, None, None + + if pathname != os.getcwd(): + cachepath.append(os.path.join(pathname, ".neurojson")) + + if platform == "win32": + cachepath.append(os.path.join(os.getenv("PROGRAMDATA"), "neurojson")) + elif platform == "darwin": + cachepath.append(os.path.join(pathname, "Library/neurojson")) + cachepath.append("/Library/neurojson") + else: + cachepath.append(os.path.join(pathname, ".cache/neurojson")) + cachepath.append("/var/cache/neurojson") + + if ( + isinstance(url, list) or isinstance(url, tuple) or isinstance(url, frozenset) + ) and len(url) < 4: + domain = "default" + + if isinstance(url, str): + link = url + if re.match("^file://", link) or not re.search("://", link): + filename = re.sub("^file://", "", link) + if os.path.isfile(filename): + cachepath = filename + filename = True + return cachepath, filename + + else: + if re.match("^https*://neurojson.org/io/", link): + domain = "io" + else: + newdomain = re.sub("^(https*|ftp)://([^\/?#:]+).*$", r"\2", link) + if newdomain: + domain = newdomain + + dbname = re.search("(?<=db=)[^&]+", link) + docname = re.search("(?<=doc=)[^&]+", link) + filename = re.search("(?<=file=)[^&]+", link) + if dbname: + dbname = dbname.group(0) + if docname: + docname = docname.group(0) + if filename: + filename = filename.group(0) + + if not filename and domain == "neurojson.io": + ref = re.search( + "^(https*|ftp)://neurojson.io(:\d+)*(?P/[^\/]+)(?P/[^\/]+)(?P/[^\/?]+)*", + link, + ) + if ref: + if ref.group("dbname"): + dbname = ref.group("dbname")[1:] + if ref.group("docname"): + docname = ref.group("docname")[1:] + if ref.group("filename"): + filename = ref.group("filename")[1:] + elif dbname: + if docname: + filename = docname + ".json" + else: + filename = dbname + ".json" + + if not filename: + filename = sha256(link.encode("utf-8")).hexdigest() + suffix = re.search("\.\w{1,5}(?=([#&].*)*$)", link) + if not suffix: + suffix = "" + else: + suffix = suffix.group(0) + filename = filename + suffix + if not dbname: + dbname = filename[0:2] + if not docname: + docname = filename[2:4] + + p = globals().get("NEUROJSON_CACHE") + if isinstance(url, str) or ( + isinstance(url, list) + or isinstance(url, tuple) + or isinstance(url, frozenset) + and len(url) >= 3 + ): + if p is not None: + cachepath.insert(0, p) + elif dbname and docname: + print([domain, dbname, docname, cachepath]) + cachepath = [os.path.join(x, domain, dbname, docname) for x in cachepath] + if filename is not None: + for i in range(len(cachepath)): + if os.path.exists(os.path.join(cachepath[i], filename)): + cachepath = os.path.join(cachepath[i], filename) + filename = True + return cachepath, filename + elif "link" in locals(): + spl = os.path.splitext(link) + ext = spl[1].lower() + filename = fname + ext + if p is not None: + cachepath.pop(1) + else: + cachepath.pop(0) + return cachepath, filename + + +def jdlink(uripath, opt={}, **kwargs): + """@brief Printing the local folder and file name where a linked data file in the URL to be saved + + @param[in] url: a URL + @param[in] opt: options, if opt['decode']=True or 1 (default), call jdata.decode() before saving + """ + + opt.setdefault("showlink", 1) + opt.setdefault("showsize", 1) + + if isinstance(uripath, list): + if "regex" in opt: + haspattern = [ + True if re.search(opt["regex"], x) is None else False for x in uripath + ] + uripath = [x for i, x in enumerate(uripath) if haspattern[i]] + if "showsize" in opt: + totalsize = 0 + nosize = 0 + for i in range(len(uripath)): + filesize = re.findall(r"&size=(\d+)", uripath[i]) + if filesize and filesize[0]: + totalsize += int(filesize[0]) + else: + nosize += 1 + print( + "total {} links, {} bytes, {} files with unknown size".format( + len(uripath), totalsize, nosize + ) + ) + alloutput = [[] for _ in range(3)] + for i in range(len(uripath)): + newdata, fname, cachepath = downloadlink(uripath[i], opt) + alloutput[0].append(newdata) + alloutput[1].append(fname) + alloutput[2].append(cachepath) + if len(uripath) == 1: + alloutput = [x[0] for x in alloutput] + newdata, fname, cachepath = tuple(alloutput) + elif isinstance(uripath, str): + newdata, fname, cachepath = downloadlink(uripath, opt) + return newdata, fname + + +def downloadlink(uripath, opt={}): + opt.setdefault("showlink", 1) + + newdata = [] + cachepath, filename = jsoncache(uripath) + if isinstance(cachepath, list) and cachepath: + if opt["showlink"]: + print("downloading from URL:", uripath) + fname = os.path.join(cachepath[0], filename) + fpath = os.path.dirname(fname) + if not os.path.exists(fpath): + os.makedirs(fpath) + + rawdata = urllib.request.urlopen(uripath).read() + with open(fname, "wb") as fid: + fid.write(rawdata) + spl = os.path.splitext(fname) + ext = spl[1].lower() + if ext in jext["t"] or ext in jext["b"]: + newdata = jd.load(fname, opt) + + elif not isinstance(cachepath, list) and os.path.exists(cachepath): + if opt["showlink"]: + print("loading from cache:", cachepath) + fname = cachepath + spl = os.path.splitext(fname) + ext = spl[1].lower() + if ext in jext["t"] or ext in jext["b"]: + newdata = jd.load(fname, opt) + return newdata, fname, cachepath diff --git a/setup.py b/setup.py index 3dc44d4..3e79e64 100644 --- a/setup.py +++ b/setup.py @@ -4,38 +4,47 @@ readme = fh.read() setup( - name = 'jdata', - packages = ['jdata'], - version = '0.5.3', - license='Apache license 2.0', - description = 'Encoding and decoding Python data structrues using portable JData-annotated formats', - long_description=readme, - long_description_content_type="text/markdown", - author = 'Qianqian Fang', - author_email = 'fangqq@gmail.com', - maintainer= 'Qianqian Fang', - url = 'https://github.com/NeuroJSON/pyjdata', - download_url = 'https://github.com/NeuroJSON/pyjdata/archive/v0.5.3.tar.gz', - keywords = ['JSON', 'JData', 'UBJSON', 'BJData', 'OpenJData', 'NeuroJSON', 'JNIfTI', 'JMesh', 'Encoder', 'Decoder'], - platforms="any", - install_requires=[ - 'numpy>=1.8.0' - ], - classifiers=[ - 'Development Status :: 4 - Beta', - 'Intended Audience :: Developers', - 'Topic :: Software Development :: Build Tools', - 'License :: OSI Approved :: Apache Software License', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Topic :: Software Development :: Libraries', - 'Topic :: Software Development :: Libraries :: Python Modules' - ] + name="jdata", + packages=["jdata"], + version="0.5.5", + license="Apache license 2.0", + description="Encoding and decoding Python data structrues using portable JData-annotated formats", + long_description=readme, + long_description_content_type="text/markdown", + author="Qianqian Fang", + author_email="fangqq@gmail.com", + maintainer="Qianqian Fang", + url="https://github.com/NeuroJSON/pyjdata", + download_url="https://github.com/NeuroJSON/pyjdata/archive/v0.5.5.tar.gz", + keywords=[ + "JSON", + "JData", + "UBJSON", + "BJData", + "OpenJData", + "NeuroJSON", + "JNIfTI", + "JMesh", + "Encoder", + "Decoder", + ], + platforms="any", + install_requires=["numpy>=1.8.0"], + classifiers=[ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Topic :: Software Development :: Build Tools", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.4", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Topic :: Software Development :: Libraries", + "Topic :: Software Development :: Libraries :: Python Modules", + ], ) diff --git a/test/benchcodecs.py b/test/benchcodecs.py index b37cd1c..cf5a149 100644 --- a/test/benchcodecs.py +++ b/test/benchcodecs.py @@ -9,7 +9,19 @@ print("jdata version:" + jd.__version__) -codecs = ["npy", "npz", "bjd", "zlib", "lzma", "lz4", "blosc2blosclz", "blosc2lz4", "blosc2lz4hc", "blosc2zlib", "blosc2zstd"] +codecs = [ + "npy", + "npz", + "bjd", + "zlib", + "lzma", + "lz4", + "blosc2blosclz", + "blosc2lz4", + "blosc2lz4hc", + "blosc2zlib", + "blosc2zstd", +] nthread = 8 diff --git a/test/testjd.py b/test/testjd.py index 0e11870..018558a 100644 --- a/test/testjd.py +++ b/test/testjd.py @@ -42,7 +42,13 @@ def test_module(self): print("== JData-annotated data exported to JSON with zlib compression ==") newdata = data.copy() - print(jd.show(jd.encode(newdata, {"compression": "zlib", "base64": True}), indent=4, default=jd.jsonfilter)) + print( + jd.show( + jd.encode(newdata, {"compression": "zlib", "base64": True}), + indent=4, + default=jd.jsonfilter, + ) + ) print("== Decoding a JData-encoded data and printed in JSON format ==") newdata = data.copy()