From 49c9c0be657b09c74afd8570510430527a5da52e Mon Sep 17 00:00:00 2001 From: NekoAria <990879119@qq.com> Date: Mon, 14 Mar 2022 19:26:26 +0800 Subject: [PATCH] =?UTF-8?q?:art:=20=E6=94=B9=E8=BF=9B=E7=BB=93=E6=9E=84?= =?UTF-8?q?=E5=92=8C=E4=BB=A3=E7=A0=81=E6=A0=BC=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 部分函数名、参数名、变量名规范化 - 修正一些拼写错误 - 用 `isort` 和 `Black` 对代码格式化 --- PicImageSearch/Async/__init__.py | 8 +- PicImageSearch/Async/ascii2d.py | 16 +- PicImageSearch/Async/baidu.py | 50 ++--- PicImageSearch/Async/google.py | 33 ++-- PicImageSearch/Async/iqdb.py | 25 +-- PicImageSearch/Async/network.py | 41 +++-- PicImageSearch/Async/saucenao.py | 57 +++--- PicImageSearch/Async/tracemoe.py | 206 ++++++++++++--------- PicImageSearch/Utils/__init__.py | 10 +- PicImageSearch/Utils/ascii2d.py | 47 +++-- PicImageSearch/Utils/baidu.py | 42 +++-- PicImageSearch/Utils/errors.py | 2 +- PicImageSearch/Utils/google.py | 52 +++--- PicImageSearch/Utils/iqdb.py | 37 ++-- PicImageSearch/Utils/saucenao.py | 120 ++++++------ PicImageSearch/__init__.py | 24 ++- PicImageSearch/ascii2d.py | 40 ++-- PicImageSearch/baidu.py | 69 ++++--- PicImageSearch/google.py | 30 +-- PicImageSearch/iqdb.py | 40 ++-- PicImageSearch/saucenao.py | 77 ++++---- PicImageSearch/tracemoe.py | 211 ++++++++++++---------- docs/Google/Demo.md | 10 +- setup.py | 14 +- test/Async/{test3.py => ascii2d_test.py} | 17 +- test/Async/{test6.py => baidu_test.py} | 12 +- test/Async/google_test.py | 30 +++ test/Async/{test4.py => iqdb_test.py} | 6 +- test/Async/{test2.py => saucenao_test.py} | 18 +- test/Async/test5.py | 23 --- test/Async/{test1.py => tracemoe_test.py} | 5 +- test/ascii2d_test.py | 18 ++ test/{test6.py => baidu_test.py} | 8 +- test/google_test.py | 27 +++ test/{test4_3d.py => iqdb_3d_test.py} | 14 +- test/{test4.py => iqdb_test.py} | 5 +- test/{test2.py => saucenao_test.py} | 17 +- test/test3.py | 15 -- test/test5.py | 23 --- test/{test1.py => tracemoe_test.py} | 2 +- 40 files changed, 842 insertions(+), 659 deletions(-) rename test/Async/{test3.py => ascii2d_test.py} (52%) rename test/Async/{test6.py => baidu_test.py} (60%) create mode 100644 test/Async/google_test.py rename test/Async/{test4.py => iqdb_test.py} (95%) rename test/Async/{test2.py => saucenao_test.py} (56%) delete mode 100644 test/Async/test5.py rename test/Async/{test1.py => tracemoe_test.py} (88%) create mode 100644 test/ascii2d_test.py rename test/{test6.py => baidu_test.py} (64%) create mode 100644 test/google_test.py rename test/{test4_3d.py => iqdb_3d_test.py} (76%) rename test/{test4.py => iqdb_test.py} (94%) rename test/{test2.py => saucenao_test.py} (50%) delete mode 100644 test/test3.py delete mode 100644 test/test5.py rename test/{test1.py => tracemoe_test.py} (97%) diff --git a/PicImageSearch/Async/__init__.py b/PicImageSearch/Async/__init__.py index 18d21212..18f98ed6 100644 --- a/PicImageSearch/Async/__init__.py +++ b/PicImageSearch/Async/__init__.py @@ -1,7 +1,7 @@ -from .network import NetWork -from .tracemoe import AsyncTraceMoe -from .saucenao import AsyncSauceNAO from .ascii2d import AsyncAscii2D -from .iqdb import AsyncIqdb from .baidu import AsyncBaiDu from .google import AsyncGoogle +from .iqdb import AsyncIqdb +from .network import NetWork +from .saucenao import AsyncSauceNAO +from .tracemoe import AsyncTraceMoe diff --git a/PicImageSearch/Async/ascii2d.py b/PicImageSearch/Async/ascii2d.py index e3cb326e..e8c2d2fe 100644 --- a/PicImageSearch/Async/ascii2d.py +++ b/PicImageSearch/Async/ascii2d.py @@ -1,8 +1,8 @@ from bs4 import BeautifulSoup from loguru import logger -from .network import HandOver from ..Utils import Ascii2DResponse, get_error_message +from .network import HandOver class AsyncAscii2D(HandOver): @@ -25,8 +25,8 @@ def __init__(self, bovw=False, **requests_kwargs): @staticmethod def _slice(res) -> Ascii2DResponse: - soup = BeautifulSoup(res, 'html.parser') - resp = soup.find_all(class_='row item-box') + soup = BeautifulSoup(res, "html.parser") + resp = soup.find_all(class_="row item-box") return Ascii2DResponse(resp) async def search(self, url) -> Ascii2DResponse: @@ -48,17 +48,17 @@ async def search(self, url) -> Ascii2DResponse: • .raw[0].detail = First index of details image that was found """ try: - if url[:4] == 'http': # 网络url - ascii2d_url = 'https://ascii2d.net/search/uri' + if url[:4] == "http": # 网络url + ascii2d_url = "https://ascii2d.net/search/uri" res = await self.post(ascii2d_url, _data={"uri": url}) else: # 是否是本地文件 - ascii2d_url = 'https://ascii2d.net/search/file' - res = await self.post(ascii2d_url, _files={"file": open(url, 'rb')}) + ascii2d_url = "https://ascii2d.net/search/file" + res = await self.post(ascii2d_url, _files={"file": open(url, "rb")}) if res.status_code == 200: if self.bovw: # 如果启用bovw选项,第一次请求是向服务器提交文件 - res = await self.get(str(res.url).replace('/color/', '/bovw/')) + res = await self.get(str(res.url).replace("/color/", "/bovw/")) else: logger.error(res.status_code) logger.error(get_error_message(res.status_code)) diff --git a/PicImageSearch/Async/baidu.py b/PicImageSearch/Async/baidu.py index d35cc6d8..e455496b 100644 --- a/PicImageSearch/Async/baidu.py +++ b/PicImageSearch/Async/baidu.py @@ -1,40 +1,42 @@ import time -from .network import HandOver from PicImageSearch.Utils import BaiDuResponse +from .network import HandOver -class AsyncBaiDu(HandOver): +class AsyncBaiDu(HandOver): def __init__(self, **requests_kwargs): super().__init__(**requests_kwargs) - self.url = 'https://graph.baidu.com/upload' + self.url = "https://graph.baidu.com/upload" self.requests_kwargs = requests_kwargs self.headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.72 Safari/537.36 Edg/89.0.774.45' + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.72 Safari/537.36 Edg/89.0.774.45" } async def search(self, url: str) -> BaiDuResponse: - params = { - 'uptime': int(time.time()) - } - if url[:4] == 'http': # 网络url - m = {'image': url, - 'range': '{"page_from": "searchIndex"}', - 'from': "pc", - 'tn': 'pc', - 'image_source': 'PC_UPLOAD_MOVE', - 'sdkParams': '{"data":"a4388c3ef696d354e7f05402e1d38daf48bfb4f3d5bd941e2d0c920dc3b387065b7c85440986897b1f56ef6d352e3b94b3ea435ba5e1bb5a86c5feb88e2e9e1179abd5b8699370b6be8e7cfb96e6e605","key_id":"23","sign":"f22953e8"}' - } + params = {"uptime": int(time.time())} + if url[:4] == "http": # 网络url + m = { + "image": url, + "range": '{"page_from": "searchIndex"}', + "from": "pc", + "tn": "pc", + "image_source": "PC_UPLOAD_MOVE", + "sdkParams": '{"data":"a4388c3ef696d354e7f05402e1d38daf48bfb4f3d5bd941e2d0c920dc3b387065b7c85440986897b1f56ef6d352e3b94b3ea435ba5e1bb5a86c5feb88e2e9e1179abd5b8699370b6be8e7cfb96e6e605","key_id":"23","sign":"f22953e8"}', + } else: # 文件 - m = {'image': ('filename', open(url, 'rb')), - 'range': '{"page_from": "searchIndex"}', - 'from': "pc", - 'tn': 'pc', - 'image_source': 'PC_UPLOAD_SEARCH_FILE', - 'sdkParams': '{"data":"a4388c3ef696d354e7f05402e1d38daf48bfb4f3d5bd941e2d0c920dc3b387065b7c85440986897b1f56ef6d352e3b94b3ea435ba5e1bb5a86c5feb88e2e9e1179abd5b8699370b6be8e7cfb96e6e605","key_id":"23","sign":"f22953e8"}' - } - res = await self.post(self.url, _headers=self.headers, _params=params, _data=m) # 上传文件 - url = res.json()['data']['url'] + m = { + "image": ("filename", open(url, "rb")), + "range": '{"page_from": "searchIndex"}', + "from": "pc", + "tn": "pc", + "image_source": "PC_UPLOAD_SEARCH_FILE", + "sdkParams": '{"data":"a4388c3ef696d354e7f05402e1d38daf48bfb4f3d5bd941e2d0c920dc3b387065b7c85440986897b1f56ef6d352e3b94b3ea435ba5e1bb5a86c5feb88e2e9e1179abd5b8699370b6be8e7cfb96e6e605","key_id":"23","sign":"f22953e8"}', + } + res = await self.post( + self.url, _headers=self.headers, _params=params, _data=m + ) # 上传文件 + url = res.json()["data"]["url"] resp = await self.get(url, _headers=self.headers) return BaiDuResponse(resp) diff --git a/PicImageSearch/Async/google.py b/PicImageSearch/Async/google.py index c5bb7e24..d6e06e6a 100644 --- a/PicImageSearch/Async/google.py +++ b/PicImageSearch/Async/google.py @@ -1,10 +1,11 @@ +from urllib.parse import quote + from bs4 import BeautifulSoup from loguru import logger - -from .network import HandOver from PicImageSearch.Utils import GoogleResponse -from urllib.parse import quote + from ..Utils import get_error_message +from .network import HandOver class AsyncGoogle(HandOver): @@ -22,23 +23,22 @@ class AsyncGoogle(HandOver): def __init__(self, **request_kwargs): super().__init__(**request_kwargs) params = dict() - self.url = 'https://www.google.com/searchbyimage' + self.url = "https://www.google.com/searchbyimage" self.params = params self.header = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0', + "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0", } self.requests_kwargs = request_kwargs @staticmethod def _slice(res, index) -> GoogleResponse: - soup = BeautifulSoup(res, 'html.parser') - resp = soup.find_all(class_='g') + soup = BeautifulSoup(res, "html.parser") + resp = soup.find_all(class_="g") pages = soup.find_all("td") return GoogleResponse(resp, pages[1:], index) async def goto_page(self, url, index): - response = await self.get( - url, _headers=self.header) + response = await self.get(url, _headers=self.header) if response.status_code == 200: return self._slice(response.text, index) @@ -60,16 +60,17 @@ async def search(self, url) -> GoogleResponse: """ try: params = self.params - if url[:4] == 'http': - encoded_image_url = quote(url, safe='') - params['image_url'] = encoded_image_url + if url[:4] == "http": + encoded_image_url = quote(url, safe="") + params["image_url"] = encoded_image_url response = await self.get( - self.url, _params=params, _headers=self.header) + self.url, _params=params, _headers=self.header + ) else: - multipart = {'encoded_image': ( - url, open(url, 'rb'))} + multipart = {"encoded_image": (url, open(url, "rb"))} response = await self.post( - f"{self.url}/upload", _files=multipart, _headers=self.header) + f"{self.url}/upload", _files=multipart, _headers=self.header + ) if response.status_code == 200: return self._slice(response.text, 1) else: diff --git a/PicImageSearch/Async/iqdb.py b/PicImageSearch/Async/iqdb.py index 57f36a15..73264536 100644 --- a/PicImageSearch/Async/iqdb.py +++ b/PicImageSearch/Async/iqdb.py @@ -1,7 +1,8 @@ from loguru import logger -from .network import HandOver from PicImageSearch.Utils.iqdb import IqdbResponse + from ..Utils import get_error_message +from .network import HandOver class AsyncIqdb(HandOver): @@ -19,8 +20,8 @@ class AsyncIqdb(HandOver): def __init__(self, **requests_kwargs): super().__init__(**requests_kwargs) self.requests_kwargs = requests_kwargs - self.url = 'https://www.iqdb.org/' - self.url_3d = 'https://3d.iqdb.org/' + self.url = "https://www.iqdb.org/" + self.url_3d = "https://3d.iqdb.org/" async def search(self, url) -> IqdbResponse: """ @@ -49,14 +50,11 @@ async def search(self, url) -> IqdbResponse: """ try: - if url[:4] == 'http': # 网络url - datas = { - "url": url - } + if url[:4] == "http": # 网络url + datas = {"url": url} res = await self.post(self.url, _data=datas) else: # 是否是本地文件 - res = await self.post(self.url, - _files={'file': open(url, 'rb')}) + res = await self.post(self.url, _files={"file": open(url, "rb")}) if res.status_code == 200: # logger.info(res.text) return IqdbResponse(res.content) @@ -84,14 +82,11 @@ async def search_3d(self, url) -> IqdbResponse: • .raw[0].size = First index detail of image size that was found """ try: - if url[:4] == 'http': # 网络url - datas = { - "url": url - } + if url[:4] == "http": # 网络url + datas = {"url": url} res = await self.post(self.url_3d, _data=datas) else: # 是否是本地文件 - res = await self.post(self.url_3d, - _files={'file': open(url, 'rb')}) + res = await self.post(self.url_3d, _files={"file": open(url, "rb")}) if res.status_code == 200: return IqdbResponse(res.content) else: diff --git a/PicImageSearch/Async/network.py b/PicImageSearch/Async/network.py index a44472be..2f4ec8e4 100644 --- a/PicImageSearch/Async/network.py +++ b/PicImageSearch/Async/network.py @@ -6,7 +6,15 @@ class NetWork: - def __init__(self, limit=30, max_connections=100, timeout=20, env=False, internal=False, proxy=None): + def __init__( + self, + limit=30, + max_connections=100, + timeout=20, + env=False, + internal=False, + proxy=None, + ): """ :param limit: @@ -22,8 +30,10 @@ def __init__(self, limit=30, max_connections=100, timeout=20, env=False, interna verify=False, timeout=httpx.Timeout(timeout, connect=60), proxies=self.proxy, - limits=httpx.Limits(max_keepalive_connections=limit, max_connections=max_connections), - trust_env=env + limits=httpx.Limits( + max_keepalive_connections=limit, max_connections=max_connections + ), + trust_env=env, ) def start(self): @@ -55,9 +65,8 @@ async def __aenter__(self): return self.session async def __aexit__(self, exception_type, exception_value, traceback): - if isinstance(self.session, NetWork): - if self.session.internal: - await self.session.close() + if isinstance(self.session, NetWork) and self.session.internal: + await self.session.close() class HandOver(object): @@ -73,25 +82,33 @@ async def get(self, _url, _headers=None, _params=None): await asyncio.sleep(0) return res - async def post(self, _url, _headers=None, _params=None, _data=None, _json=None, _files=None): + async def post( + self, _url, _headers=None, _params=None, _data=None, _json=None, _files=None + ): async with ClientManager(self.session, self.env, self.proxy) as session: if _json: - res = await session.post(_url, headers=_headers, params=_params, json=_json) + res = await session.post( + _url, headers=_headers, params=_params, json=_json + ) elif _files: - res = await session.post(_url, headers=_headers, params=_params, files=_files) + res = await session.post( + _url, headers=_headers, params=_params, files=_files + ) else: - res = await session.post(_url, headers=_headers, params=_params, data=_data) + res = await session.post( + _url, headers=_headers, params=_params, data=_data + ) await asyncio.sleep(0) return res - async def downloader(self, url='', path=None, filename=''): # 下载器 + async def downloader(self, url="", path=None, filename=""): # 下载器 async with ClientManager(self.session, self.env, self.proxy) as session: async with session.stream("GET", url=url) as r: if path: file = Path(path).joinpath(filename) else: file = Path().cwd().joinpath(filename) - async with aiofiles.open(file, 'wb') as out_file: + async with aiofiles.open(file, "wb") as out_file: async for chunk in r.aiter_bytes(): await out_file.write(chunk) return file diff --git a/PicImageSearch/Async/saucenao.py b/PicImageSearch/Async/saucenao.py index f85b286a..4345a0d8 100644 --- a/PicImageSearch/Async/saucenao.py +++ b/PicImageSearch/Async/saucenao.py @@ -1,15 +1,25 @@ from loguru import logger - -from .network import HandOver from PicImageSearch.Utils import SauceNAOResponse from ..Utils import get_error_message +from .network import HandOver class AsyncSauceNAO(HandOver): - - def __init__(self, api_key: str = None, *, numres: int = 5, hide: int = 1, minsim: int = 30, output_type: int = 2, - testmode: int = 0, dbmask: int = None, dbmaski: int = None, db: int = 999, **requests_kwargs) -> None: + def __init__( + self, + api_key: str = None, + *, + numres: int = 5, + hide: int = 1, + minsim: int = 30, + output_type: int = 2, + testmode: int = 0, + dbmask: int = None, + dbmaski: int = None, + db: int = 999, + **requests_kwargs + ) -> None: """ SauceNAO ----------- @@ -18,7 +28,6 @@ def __init__(self, api_key: str = None, *, numres: int = 5, hide: int = 1, minsi Params Keys ----------- - :param api_key: (str) Access key for SauceNAO (default=None) :param output_type:(int) 0=normal (default) html 1=xml api (not implemented) 2=json api default=2 :param testmode:(int) Test mode 0=normal 1=test (default=0) @@ -31,22 +40,22 @@ def __init__(self, api_key: str = None, *, numres: int = 5, hide: int = 1, minsi """ # minsim 控制最小相似度 super().__init__(**requests_kwargs) - self.url = 'https://saucenao.com/search.php' + self.url = "https://saucenao.com/search.php" self.requests_kwargs = requests_kwargs params = { - 'testmode': testmode, - 'numres': numres, - 'output_type': output_type, - 'hide': hide, - 'db': db, - 'minsim': minsim + "testmode": testmode, + "numres": numres, + "output_type": output_type, + "hide": hide, + "db": db, + "minsim": minsim, } if api_key is not None: - params['api_key'] = api_key + params["api_key"] = api_key if dbmask is not None: - params['dbmask'] = dbmask + params["dbmask"] = dbmask if dbmaski is not None: - params['dbmaski'] = dbmaski + params["dbmaski"] = dbmaski self.params = params async def search(self, url: str) -> SauceNAOResponse: @@ -82,12 +91,18 @@ async def search(self, url: str) -> SauceNAOResponse: params = self.params headers = dict() m = None - if url[:4] == 'http': # 网络url - params['url'] = url - resp = await self.post(self.url, _headers=headers, _data=m, _params=params) + if url[:4] == "http": # 网络url + params["url"] = url + resp = await self.post( + self.url, _headers=headers, _data=m, _params=params + ) else: # 文件 - resp = await self.post(self.url, _headers=headers, _params=params, - _files={'file': open(url, 'rb')}) + resp = await self.post( + self.url, + _headers=headers, + _params=params, + _files={"file": open(url, "rb")}, + ) if resp.status_code == 200: data = resp.json() return SauceNAOResponse(data) diff --git a/PicImageSearch/Async/tracemoe.py b/PicImageSearch/Async/tracemoe.py index acc269e9..4efb0f84 100644 --- a/PicImageSearch/Async/tracemoe.py +++ b/PicImageSearch/Async/tracemoe.py @@ -1,48 +1,54 @@ +from pathlib import Path from typing import List, Optional + import requests from loguru import logger -from pathlib import Path -from .network import HandOver + from ..Utils import get_error_message +from .network import HandOver class TraceMoeAnilist: def __init__(self, data): - self.id: int = data['id'] + self.id: int = data["id"] """匹配的Anilist ID见https://anilist.co/""" - self.idMal: int = data['idMal'] + self.idMal: int = data["idMal"] """匹配的MyAnimelist ID见https://myanimelist.net/""" - self.title: dict = data['title'] + self.title: dict = data["title"] """番剧名字""" - self.title_native: str = data['title']['native'] + self.title_native: str = data["title"]["native"] """番剧国际命名""" - self.title_english: str = data['title']['english'] + self.title_english: str = data["title"]["english"] """番剧英文命名""" - self.title_romaji: str = data['title']['romaji'] + self.title_romaji: str = data["title"]["romaji"] """番剧罗马命名""" - self.title_chinese: str = 'NULL' + self.title_chinese: str = "NULL" """番剧中文命名""" - self.synonyms: list = data['synonyms'] + self.synonyms: list = data["synonyms"] """备用英文标题""" - self.isAdult: bool = data['isAdult'] + self.isAdult: bool = data["isAdult"] """是否R18""" - def setChinese(self, data): + def set_chinese(self, data): self.title = data - if 'chinese' in data.keys(): - self.title_chinese: str = data['chinese'] # 番剧中文命名 + if "chinese" in data.keys(): + self.title_chinese: str = data["chinese"] # 番剧中文命名 def __repr__(self): - return f'( ' + return ( + f"( " + ) class AsyncTraceMoeNorm(HandOver): - def __init__(self, data, chineseTitle=True, mute=False, size=None, **requests_kwargs): + def __init__( + self, data, chinese_title=True, mute=False, size=None, **requests_kwargs + ): """ :param data: 数据 - :param chineseTitle: 中文番剧名称显示 + :param chinese_title: 中文番剧名称显示 :param mute: 预览视频静音 :param size: 视频与图片大小(s/m/l) """ @@ -53,13 +59,13 @@ def __init__(self, data, chineseTitle=True, mute=False, size=None, **requests_kw """匹配的MyAnimelist ID见https://myanimelist.net/""" self.title: dict = {} """剧名字""" - self.title_native: str = 'NULL' + self.title_native: str = "NULL" """番剧国际命名""" - self.title_english: str = 'NULL' + self.title_english: str = "NULL" """剧英文命名""" - self.title_romaji: str = 'NULL' + self.title_romaji: str = "NULL" """番剧罗马命名""" - self.title_chinese: str = 'NULL' + self.title_chinese: str = "NULL" """番剧中文命名""" self.anilist: Optional[int] = None """匹配的Anilist ID见https://anilist.co/""" @@ -67,38 +73,40 @@ def __init__(self, data, chineseTitle=True, mute=False, size=None, **requests_kw """备用英文标题""" self.isAdult: bool = False """是否R18""" - if type(data['anilist']) == dict: - self.anilist = data['anilist']['id'] # 匹配的Anilist ID见https://anilist.co/ - self.idMal: int = data['anilist']['idMal'] # 匹配的MyAnimelist ID见https://myanimelist.net/ - self.title: dict = data['anilist']['title'] # 番剧名字 - self.title_native: str = data['anilist']['title']['native'] # 番剧国际命名 - self.title_english: str = data['anilist']['title']['english'] # 番剧英文命名 - self.title_romaji: str = data['anilist']['title']['romaji'] # 番剧罗马命名 - self.synonyms: list = data['anilist']['synonyms'] # 备用英文标题 - self.isAdult: bool = data['anilist']['isAdult'] # 是否R18 - if chineseTitle: - self.title_chinese: str = self._getChineseTitle() # 番剧中文命名 + if type(data["anilist"]) == dict: + self.anilist = data["anilist"]["id"] # 匹配的Anilist ID见https://anilist.co/ + self.idMal: int = data["anilist"][ + "idMal" + ] # 匹配的MyAnimelist ID见https://myanimelist.net/ + self.title: dict = data["anilist"]["title"] # 番剧名字 + self.title_native: str = data["anilist"]["title"]["native"] # 番剧国际命名 + self.title_english: str = data["anilist"]["title"]["english"] # 番剧英文命名 + self.title_romaji: str = data["anilist"]["title"]["romaji"] # 番剧罗马命名 + self.synonyms: list = data["anilist"]["synonyms"] # 备用英文标题 + self.isAdult: bool = data["anilist"]["isAdult"] # 是否R18 + if chinese_title: + self.title_chinese: str = self._get_chinese_title() # 番剧中文命名 else: - self.anilist = data['anilist'] # 匹配的Anilist ID见https://anilist.co/ - self.filename: str = data['filename'] + self.anilist = data["anilist"] # 匹配的Anilist ID见https://anilist.co/ + self.filename: str = data["filename"] """找到匹配项的文件名""" - self.episode: int = data['episode'] + self.episode: int = data["episode"] """估计的匹配的番剧的集数""" - self.From: int = data['from'] + self.From: int = data["from"] """匹配场景的开始时间""" - self.To: int = data['to'] + self.To: int = data["to"] """匹配场景的结束时间""" - self.similarity: float = float(data['similarity']) + self.similarity: float = float(data["similarity"]) """相似度,相似性低于 87% 的搜索结果可能是不正确的结果""" - self.video: str = data['video'] + self.video: str = data["video"] """预览视频""" - self.image: str = data['image'] + self.image: str = data["image"] """预览图像""" - if size in ['l', 's', 'm']: # 大小设置 - self.video += '&size=' + size - self.image += '&size=' + size + if size in ["l", "s", "m"]: # 大小设置 + self.video += "&size=" + size + self.image += "&size=" + size if mute: # 视频静音设置 - self.video += '&mute' + self.video += "&mute" # ---------------过时版本----------------------- # self.anilist_id: int = data['anilist_id'] @@ -134,7 +142,9 @@ def __init__(self, data, chineseTitle=True, mute=False, size=None, **requests_kw # url = url + '&mute' # return url - async def download_image(self, filename='image.png', path: Path = Path.cwd()) -> Path: + async def download_image( + self, filename="image.png", path: Path = Path.cwd() + ) -> Path: """ 下载缩略图 @@ -145,7 +155,9 @@ async def download_image(self, filename='image.png', path: Path = Path.cwd()) -> endpoint = await self.downloader(self.image, path, filename) return endpoint - async def download_video(self, filename='video.mp4', path: Path = Path.cwd()) -> Path: + async def download_video( + self, filename="video.mp4", path: Path = Path.cwd() + ) -> Path: """ 下载预览视频 @@ -157,17 +169,19 @@ async def download_video(self, filename='video.mp4', path: Path = Path.cwd()) -> endpoint = await self.downloader(self.video, path, filename) return endpoint - def _getChineseTitle(self): - return self.animeTitle(self.origin['anilist']['id'])['data']['Media']['title']['chinese'] + def _get_chinese_title(self): + return self.get_anime_title(self.origin["anilist"]["id"])["data"]["Media"][ + "title" + ]["chinese"] @staticmethod - def animeTitle(anilistID: int) -> dict: + def get_anime_title(anilist_id: int) -> dict: """获取中文标题 - :param anilistID: id + :param anilist_id: id :return: dict """ - query = ''' + query = """ query ($id: Int) { # Define which variables will be used in the query (id) Media (id: $id, type: ANIME) { # Insert our variables into the query arguments (id) (type: ANIME is hard-coded in the query) id @@ -178,38 +192,43 @@ def animeTitle(anilistID: int) -> dict: } } } - ''' + """ # Define our query variables and values that will be used in the query request - variables = { - 'id': anilistID - } + variables = {"id": anilist_id} - url = 'https://trace.moe/anilist/' + url = "https://trace.moe/anilist/" - response = requests.post(url, json={'query': query, 'variables': variables}) + response = requests.post(url, json={"query": query, "variables": variables}) return response.json() def __repr__(self): - return f'' + return f"" class TraceMoeResponse: - def __init__(self, resp, chineseTitle, mute, size, **requests_kwargs): + def __init__(self, resp, chinese_title, mute, size, **requests_kwargs): self.requests_kwargs = requests_kwargs self.origin: dict = resp """原始数据""" self.raw: List[AsyncTraceMoeNorm] = list() """结果返回值""" - resp_docs = resp['result'] + resp_docs = resp["result"] for i in resp_docs: self.raw.append( - AsyncTraceMoeNorm(i, chineseTitle=chineseTitle, mute=mute, size=size, **self.requests_kwargs)) + AsyncTraceMoeNorm( + i, + chinese_title=chinese_title, + mute=mute, + size=size, + **self.requests_kwargs, + ) + ) self.count: int = len(self.raw) """搜索结果数量""" - self.frameCount: int = resp['frameCount'] + self.frameCount: int = resp["frameCount"] """搜索的帧总数""" - self.error: str = resp['error'] + self.error: str = resp["error"] """错误报告""" # ---------------过时版本----------------------- # self.RawDocsSearchTime: int = resp['RawDocsSearchTime'] # 从数据库检索帧所用的时间 @@ -222,7 +241,7 @@ def __init__(self, resp, chineseTitle, mute, size, **requests_kwargs): # self.quota_ttl: int = resp['quota_ttl'] # 配额重置之前的时间(秒) def __repr__(self): - return f'' + return f"" class AsyncTraceMoe(HandOver): - TraceMoeURL = 'https://api.trace.moe/search' - MeURL = 'https://api.trace.moe/me' + TraceMoeURL = "https://api.trace.moe/search" + MeURL = "https://api.trace.moe/me" def __init__(self, mute=False, size=None, **requests_kwargs): """主类 @@ -270,7 +289,7 @@ async def me(self, key=None) -> TraceMoeMe: try: params = None if key: - params = {'key': key} + params = {"key": key} res = await self.get(self.MeURL, _params=params, **self.requests_kwargs) if res.status_code == 200: data = res.json() @@ -281,49 +300,62 @@ async def me(self, key=None) -> TraceMoeMe: logger.info(e) @staticmethod - def _firstIf(param): + def _first_if(param): if param != "": param += "&" return param @staticmethod - def setParams(url, anilistID, anilistInfo, cutBorders): + def set_params(url, anilist_id, anilist_info, cut_borders): params = {} - if anilistInfo: + if anilist_info: params["anilistInfo"] = True - if cutBorders: + if cut_borders: params["cutBorders"] = True - if anilistID: - params["anilistID"] = anilistID + if anilist_id: + params["anilistID"] = anilist_id if url: params["url"] = url return params - async def search(self, url, key=None, anilistID=None, chineseTitle=True, - anilistInfo=True, cutBorders=True) -> TraceMoeResponse: + async def search( + self, + url, + key=None, + anilist_id=None, + chinese_title=True, + anilist_info=True, + cut_borders=True, + ) -> TraceMoeResponse: """识别图片 :param key: API密钥 https://soruly.github.io/trace.moe-api/#/limits?id=api-search-quota-and-limits :param url: 网络地址(http或https链接)或本地(本地图片路径) When using video / gif, only the 1st frame would be extracted for searching. - :param anilistID: 搜索限制为特定的 Anilist ID(默认无) - :param anilistInfo: 详细信息(默认开启) - :param chineseTitle: 中文番剧标题 - :param cutBorders: 切割黑边框(默认开启) + :param anilist_id: 搜索限制为特定的 Anilist ID(默认无) + :param anilist_info: 详细信息(默认开启) + :param chinese_title: 中文番剧标题 + :param cut_borders: 切割黑边框(默认开启) """ try: headers = None if headers: headers = {"x-trace-key": key} - if url[:4] == 'http': # 网络url - params = self.setParams(url, anilistID, anilistInfo, cutBorders) + if url[:4] == "http": # 网络url + params = self.set_params(url, anilist_id, anilist_info, cut_borders) res = await self.get(self.TraceMoeURL, _headers=headers, _params=params) else: # 是否是本地文件 - params = self.setParams(None, anilistID, anilistInfo, cutBorders) - res = await self.post(self.TraceMoeURL, _headers=headers, _params=params, - _files={"image": open(url, "rb")}) + params = self.set_params(None, anilist_id, anilist_info, cut_borders) + res = await self.post( + self.TraceMoeURL, + _headers=headers, + _params=params, + _files={"image": open(url, "rb")}, + ) if res.status_code == 200: data = res.json() - return TraceMoeResponse(data, chineseTitle, self.mute, self.size, **self.requests_kwargs) + return TraceMoeResponse( + data, chinese_title, self.mute, self.size, **self.requests_kwargs + ) else: logger.error(get_error_message(res.status_code)) except Exception as e: diff --git a/PicImageSearch/Utils/__init__.py b/PicImageSearch/Utils/__init__.py index 5f6b7273..4604338b 100644 --- a/PicImageSearch/Utils/__init__.py +++ b/PicImageSearch/Utils/__init__.py @@ -1,6 +1,6 @@ -from .saucenao import SauceNAOResponse, SauceNAONorm -from .ascii2d import Ascii2DResponse, Ascii2DNorm -from .iqdb import IqdbResponse, IqdbNorm -from .google import GoogleResponse, GoogleNorm -from .baidu import BaiDuResponse, BaiDuNorm +from .ascii2d import Ascii2DNorm, Ascii2DResponse +from .baidu import BaiDuNorm, BaiDuResponse from .errors import get_error_message +from .google import GoogleNorm, GoogleResponse +from .iqdb import IqdbNorm, IqdbResponse +from .saucenao import SauceNAONorm, SauceNAOResponse diff --git a/PicImageSearch/Utils/ascii2d.py b/PicImageSearch/Utils/ascii2d.py index 70b1f5f6..bdb1ba70 100644 --- a/PicImageSearch/Utils/ascii2d.py +++ b/PicImageSearch/Utils/ascii2d.py @@ -2,7 +2,7 @@ class Ascii2DNorm: - URL = 'https://ascii2d.net' + ascii2d_url = "https://ascii2d.net" def __init__(self, data): self.thumbnail: str = "" @@ -20,44 +20,43 @@ def __init__(self, data): self._arrange(data) def _arrange(self, data): - o_url = data[3].find('div', class_="detail-box gray-link").contents - urls = self._geturls(o_url) - self.thumbnail = self.URL + data[1].find('img')['src'] - self.url = urls['url'] - self.title = urls['title'] - self.authors = urls['authors'] - self.marks = urls['mark'] + o_url = data[3].find("div", class_="detail-box gray-link").contents + urls = self._get_urls(o_url) + self.thumbnail = self.ascii2d_url + data[1].find("img")["src"] + self.url = urls["url"] + self.title = urls["title"] + self.authors = urls["authors"] + self.marks = urls["mark"] @staticmethod - def _geturls(data): + def _get_urls(data): all_urls = { - 'url': "", - 'title': "", - 'authors_urls': "", - 'authors': "", - 'mark': "" + "url": "", + "title": "", + "authors_urls": "", + "authors": "", + "mark": "", } for x in data: - if x == '\n': + if x == "\n": continue try: - origin = x.find_all('a') - all_urls['url'] = origin[0]['href'] - all_urls['title'] = origin[0].string - all_urls['authors_urls'] = origin[1]['href'] - all_urls['authors'] = origin[1].string - all_urls['mark'] = x.small.string + origin = x.find_all("a") + all_urls["url"] = origin[0]["href"] + all_urls["title"] = origin[0].string + all_urls["authors_urls"] = origin[1]["href"] + all_urls["authors"] = origin[1].string + all_urls["mark"] = x.small.string except: pass return all_urls def __repr__(self): - return f'' + return f"" class Ascii2DResponse: - def __init__(self, resp): self.origin: list = resp """原始返回值""" @@ -68,4 +67,4 @@ def __init__(self, resp): self.raw.append(Ascii2DNorm(detail)) def __repr__(self): - return f'' + return f"" diff --git a/PicImageSearch/Utils/baidu.py b/PicImageSearch/Utils/baidu.py index 49fdcf36..c8ed3e22 100644 --- a/PicImageSearch/Utils/baidu.py +++ b/PicImageSearch/Utils/baidu.py @@ -7,23 +7,23 @@ class BaiDuNorm: def __init__(self, data): self.origin: dict = data """原始返回值""" - self.page_title: str = data['fromPageTitle'] + self.page_title: str = data["fromPageTitle"] """页面标题""" - self.title: str = data['title'][0] + self.title: str = data["title"][0] """标题""" - self.abstract: str = data['abstract'] + self.abstract: str = data["abstract"] """说明文字""" - self.image_src: str = data['image_src'] + self.image_src: str = data["image_src"] """图片地址""" - self.url: str = data['url'] + self.url: str = data["url"] """图片所在网页地址""" self.img: list = list() """其他图片地址列表""" - if 'imgList' in data: - self.img: list = data['imgList'] + if "imgList" in data: + self.img: list = data["imgList"] def __repr__(self): - return f'' + return f"" class BaiDuResponse: @@ -34,24 +34,32 @@ def __init__(self, resp): """相似结果返回值""" self.raw: Optional[List[BaiDuNorm]] = list() """来源结果返回值""" - self.origin: list = json.loads(re.search(pattern='cardData = (.+);window\.commonData', string=resp.text)[1]) + self.origin: list = json.loads( + re.search(pattern=r"cardData = (.+);window\.commonData", string=resp.text)[ + 1 + ] + ) """原始返回值""" for i in self.origin: - setattr(self, i['cardName'], i) - if hasattr(self, 'same'): - self.raw = [BaiDuNorm(x) for x in self.same['tplData']['list']] - info = self.same['extData']['showInfo'] + setattr(self, i["cardName"], i) + if hasattr(self, "same"): + self.raw = [BaiDuNorm(x) for x in self.same["tplData"]["list"]] + info = self.same["extData"]["showInfo"] for y in info: - if y == 'other_info': + if y == "other_info": continue for z in info[y]: try: self.similar[info[y].index(z)][y] = z except: self.similar.append({y: z}) - self.item = [attr for attr in dir(self) if - not callable(getattr(self, attr)) and not attr.startswith(("__", 'origin', 'raw', 'same', 'url'))] + self.item = [ + attr + for attr in dir(self) + if not callable(getattr(self, attr)) + and not attr.startswith(("__", "origin", "raw", "same", "url")) + ] """获取所有卡片名""" def __repr__(self): - return f'' + return f"" diff --git a/PicImageSearch/Utils/errors.py b/PicImageSearch/Utils/errors.py index ff464cad..93be1df0 100644 --- a/PicImageSearch/Utils/errors.py +++ b/PicImageSearch/Utils/errors.py @@ -8,7 +8,7 @@ def get_error_message(code: int) -> str: elif code == 400: return "Did you have upload the image ?, or wrong request syntax" elif code == 403: - return "Forbidden,or token unvalid" + return "Forbidden,or token invalid" elif code == 429: return "Too many request" elif code == 500 or code == 503: diff --git a/PicImageSearch/Utils/google.py b/PicImageSearch/Utils/google.py index 5e943019..57e4b4cf 100644 --- a/PicImageSearch/Utils/google.py +++ b/PicImageSearch/Utils/google.py @@ -13,57 +13,57 @@ def __init__(self, data): self._arrange(data) def _arrange(self, data): - get_data = self._getdata(data) - self.title = get_data['title'] - self.url = get_data['url'] - self.thumbnail = get_data['thumbnail'] + get_data = self._get_data(data) + self.title = get_data["title"] + self.url = get_data["url"] + self.thumbnail = get_data["thumbnail"] - def _getdata(self, datas): + def _get_data(self, datas): data = { - 'thumbnail': "", - 'title': "", - 'url': "", + "thumbnail": "", + "title": "", + "url": "", } for x in datas: try: - origin = x.find_all('h3') - data['title'] = origin[0].string - url = x.find_all('a') - data['url'] = url[0]['href'] - img = self._gethumbnail(url) - data['thumbnail'] = img + origin = x.find_all("h3") + data["title"] = origin[0].string + url = x.find_all("a") + data["url"] = url[0]["href"] + img = self._get_thumbnail(url) + data["thumbnail"] = img except: pass return data @staticmethod - def _gethumbnail(data): - GOOGLEURL = "https://www.google.com/" + def _get_thumbnail(data): + google_url = "https://www.google.com/" regex = re.compile( - r"((http(s)?(\:\/\/))+(www\.)?([\w\-\.\/])*(\.[a-zA-Z]{2,3}\/?))[^\s\b\n|]*[^.,;:\?\!\@\^\$ -]") + r"((http(s)?(://))+(www\.)?([\w\-./])*(\.[a-zA-Z]{2,3}/?))[^\s\b\n|]*[^.,;:?!@^$ -]" + ) - thumbnail = "No directable url" + thumbnail = "No detectable url" for a in range(5): try: - if re.findall('jpg|png', regex.search(data[a]['href']).group(1)): - thumbnail = regex.search(data[a]['href']).group(1) - elif re.findall('/imgres', data[a]['href']): - thumbnail = f"{GOOGLEURL}{data[a]['href']}" + if re.findall("jpg|png", regex.search(data[a]["href"]).group(1)): + thumbnail = regex.search(data[a]["href"]).group(1) + elif re.findall("/imgres", data[a]["href"]): + thumbnail = f"{google_url}{data[a]['href']}" except: continue return thumbnail def __repr__(self): - return f'' + return f"" class GoogleResponse: - def __init__(self, resp, pages, index): self.origin: list = resp """原始返回值""" @@ -82,9 +82,9 @@ def __init__(self, resp, pages, index): def get_page_url(self, index): if self.index != index: - url = "https://www.google.com" + self.pages[index - 1].a['href'] + url = "https://www.google.com" + self.pages[index - 1].a["href"] print(url) return url def __repr__(self): - return f'' + return f"" diff --git a/PicImageSearch/Utils/iqdb.py b/PicImageSearch/Utils/iqdb.py index 8f583ddd..8d3a983d 100644 --- a/PicImageSearch/Utils/iqdb.py +++ b/PicImageSearch/Utils/iqdb.py @@ -1,13 +1,12 @@ import re -from typing import Optional, Dict, List +from typing import List from bs4 import BeautifulSoup -from bs4.element import Tag, NavigableString -from loguru import logger +from bs4.element import NavigableString, Tag class IqdbNorm: - _URL = 'http://www.iqdb.org' + iqdb_url = "https://www.iqdb.org" def __init__(self, data: Tag, isnot_more: bool = True): self.isnot_more: bool = isnot_more @@ -29,7 +28,7 @@ def __init__(self, data: Tag, isnot_more: bool = True): self._arrange(data.table) def _arrange(self, data: Tag): - REGEXIQ = re.compile("[0-9]+") + regex_iq = re.compile("[0-9]+") if self.isnot_more: self.content = data.tr.th.string if self.content == "No relevant matches": @@ -38,24 +37,32 @@ def _arrange(self, data: Tag): tbody = data.tr.next_sibling else: tbody = data.tr - self.url = tbody.td.a['href'] if tbody.td.a['href'][:4] == "http" else "https:" + tbody.td.a['href'] - self.thumbnail = self._URL + tbody.td.a.img['src'] + self.url = ( + tbody.td.a["href"] + if tbody.td.a["href"][:4] == "http" + else "https:" + tbody.td.a["href"] + ) + self.thumbnail = self.iqdb_url + tbody.td.a.img["src"] tbody = tbody.next_sibling source = [stt for stt in tbody.td.strings] if len(source) > 1: - self.other_source.append({"source": source[1], - "url": tbody.td.a['href'] if tbody.td.a['href'][:4] == "http" else "https:" + - tbody.td.a[ - 'href']}) + self.other_source.append( + { + "source": source[1], + "url": tbody.td.a["href"] + if tbody.td.a["href"][:4] == "http" + else "https:" + tbody.td.a["href"], + } + ) tbody = tbody.next_sibling self.size = tbody.td.string - similarity_raw = REGEXIQ.search(tbody.next_sibling.td.string) + similarity_raw = regex_iq.search(tbody.next_sibling.td.string) if similarity_raw: self.similarity = similarity_raw.group(0) + "%" self.source = source[0] def __repr__(self): - return f'' + return f"" class IqdbResponse: @@ -82,7 +89,7 @@ def _slice(self, data: bytes) -> None: pages = soup.find(attrs={"class": "pages"}) for i in pages: - if i == '\n' or str(i) == '
' or 'Your image' in str(i): + if i == "\n" or str(i) == "
" or "Your image" in str(i): continue # logger.info(i) self.raw.append(IqdbNorm(i)) @@ -110,4 +117,4 @@ def _get_show(self, data: Tag) -> None: self.tineye = "https:" + j["href"] def __repr__(self): - return f'' + return f"" diff --git a/PicImageSearch/Utils/saucenao.py b/PicImageSearch/Utils/saucenao.py index c54913e2..d5efcb02 100644 --- a/PicImageSearch/Utils/saucenao.py +++ b/PicImageSearch/Utils/saucenao.py @@ -5,18 +5,18 @@ class SauceNAONorm: def __init__(self, data: dict): - result_header = data['header'] - result_data = data['data'] + result_header = data["header"] + result_data = data["data"] self.raw: dict = data self.origin: dict = data """原始值""" - self.similarity: float = float(result_header['similarity']) + self.similarity: float = float(result_header["similarity"]) """相似度""" - self.thumbnail: str = result_header['thumbnail'] + self.thumbnail: str = result_header["thumbnail"] """缩略图地址""" - self.index_id: int = result_header['index_id'] + self.index_id: int = result_header["index_id"] """文件id""" - self.index_name: str = result_header['index_name'] + self.index_name: str = result_header["index_name"] """文件名称""" self.title: str = self._get_title(result_data) """标题""" @@ -29,95 +29,97 @@ def __init__(self, data: dict): self.member_id: str = self._get_member_id(result_data) """pixiv的画师id(如果有)""" - def download_thumbnail(self, filename='thumbnail.png'): # 缩略图生成 + def download_thumbnail(self, filename="thumbnail.png"): # 缩略图生成 with requests.get(self.thumbnail, stream=True) as resp: - with open(filename, 'wb') as fd: + with open(filename, "wb") as fd: for chunk in resp.iter_content(): fd.write(chunk) @staticmethod def _get_title(data): - if 'title' in data: - return data['title'] - elif 'eng_name' in data: - return data['eng_name'] - elif 'material' in data: - return data['material'] - elif 'source' in data: - return data['source'] - elif 'created_at' in data: - return data['created_at'] + if "title" in data: + return data["title"] + elif "eng_name" in data: + return data["eng_name"] + elif "material" in data: + return data["material"] + elif "source" in data: + return data["source"] + elif "created_at" in data: + return data["created_at"] @staticmethod def _get_url(data): - if 'ext_urls' in data: - return data['ext_urls'][0] - elif 'getchu_id' in data: - return f'http://www.getchu.com/soft.phtml?id={data["getchu_id"]}' - return '' + if "ext_urls" in data: + return data["ext_urls"][0] + elif "getchu_id" in data: + return f'https://www.getchu.com/soft.phtml?id={data["getchu_id"]}' + return "" @staticmethod def _get_author(data): - if 'author' in data: - return data['author'] - elif 'author_name' in data: - return data['author_name'] - elif 'member_name' in data: - return data['member_name'] - elif 'pawoo_user_username' in data: - return data['pawoo_user_username'] - elif 'company' in data: - return data['company'] - elif 'creator' in data: - if isinstance(data['creator'], list): - return data['creator'][0] - return data['creator'] + if "author" in data: + return data["author"] + elif "author_name" in data: + return data["author_name"] + elif "member_name" in data: + return data["member_name"] + elif "pawoo_user_username" in data: + return data["pawoo_user_username"] + elif "company" in data: + return data["company"] + elif "creator" in data: + if isinstance(data["creator"], list): + return data["creator"][0] + return data["creator"] @staticmethod def _get_pixiv_id(data): - if 'pixiv_id' in data: - return data['pixiv_id'] + if "pixiv_id" in data: + return data["pixiv_id"] else: - return '' + return "" @staticmethod def _get_member_id(data): - if 'member_id' in data: - return data['member_id'] + if "member_id" in data: + return data["member_id"] else: - return '' + return "" def __repr__(self): - return f'' + return f"" class SauceNAOResponse: def __init__(self, resp: dict): - resp_header = resp['header'] - resp_results = resp['results'] + resp_header = resp["header"] + resp_results = resp["results"] self.raw: List[SauceNAONorm] = [SauceNAONorm(i) for i in resp_results] """所有的返回结果""" self.origin: dict = resp """原始返回结果""" - self.short_remaining: int = resp_header['short_remaining'] # 每30秒访问额度 + self.short_remaining: int = resp_header["short_remaining"] # 每30秒访问额度 """每30秒访问额度""" - self.long_remaining: int = resp_header['long_remaining'] # 每天访问额度 + self.long_remaining: int = resp_header["long_remaining"] # 每天访问额度 """每天访问额度""" - self.user_id: int = resp_header['user_id'] - self.account_type: int = resp_header['account_type'] - self.short_limit: str = resp_header['short_limit'] - self.long_limit: str = resp_header['long_limit'] - self.status: int = resp_header['status'] + self.user_id: int = resp_header["user_id"] + self.account_type: int = resp_header["account_type"] + self.short_limit: str = resp_header["short_limit"] + self.long_limit: str = resp_header["long_limit"] + self.status: int = resp_header["status"] """返回http状态值""" - self.results_requested: int = resp_header['results_requested'] + self.results_requested: int = resp_header["results_requested"] """数据返回值数量""" - self.search_depth: str = resp_header['search_depth'] + self.search_depth: str = resp_header["search_depth"] """搜索所涉及的数据库数量""" - self.minimum_similarity: float = resp_header['minimum_similarity'] + self.minimum_similarity: float = resp_header["minimum_similarity"] """最小相似度""" - self.results_returned: int = resp_header['results_returned'] + self.results_returned: int = resp_header["results_returned"] """数据返回值数量""" def __repr__(self): - return (f'') + return ( + f"" + ) diff --git a/PicImageSearch/__init__.py b/PicImageSearch/__init__.py index db7c2ef2..93db3adf 100644 --- a/PicImageSearch/__init__.py +++ b/PicImageSearch/__init__.py @@ -1,12 +1,18 @@ -from .saucenao import SauceNAO -from .tracemoe import TraceMoe, TraceMoeNorm, TraceMoeResponse, TraceMoeMe, TraceMoeAnilist from .ascii2d import Ascii2D -from .iqdb import Iqdb -from .google import Google -from .baidu import BaiDu from .Async import * +from .baidu import BaiDu +from .google import Google +from .iqdb import Iqdb +from .saucenao import SauceNAO +from .tracemoe import ( + TraceMoe, + TraceMoeAnilist, + TraceMoeMe, + TraceMoeNorm, + TraceMoeResponse, +) -__author__ = 'kitUIN' -__license__ = 'MIT License' -__contributors__ = ['kitUIN', 'lleans',"chinoll","NekoAria"] -__email__ = 'kulujun@gmail.com' +__author__ = "kitUIN" +__license__ = "MIT License" +__contributors__ = ["kitUIN", "lleans", "chinoll", "NekoAria"] +__email__ = "kulujun@gmail.com" diff --git a/PicImageSearch/ascii2d.py b/PicImageSearch/ascii2d.py index b921cd47..2cc35c84 100644 --- a/PicImageSearch/ascii2d.py +++ b/PicImageSearch/ascii2d.py @@ -2,6 +2,7 @@ from bs4 import BeautifulSoup from loguru import logger from requests_toolbelt import MultipartEncoder + from .Utils import Ascii2DResponse, get_error_message @@ -24,8 +25,8 @@ def __init__(self, bovw=False, **requests_kwargs): @staticmethod def _slice(res) -> Ascii2DResponse: - soup = BeautifulSoup(res, 'html.parser') - resp = soup.find_all(class_='row item-box') + soup = BeautifulSoup(res, "html.parser") + resp = soup.find_all(class_="row item-box") return Ascii2DResponse(resp) def search(self, url) -> Ascii2DResponse: @@ -46,31 +47,34 @@ def search(self, url) -> Ascii2DResponse: • .raw[0].thumbnail = First index of url image that was found\n • .raw[0].detail = First index of details image that was found """ - scraper = cloudscraper.create_scraper({'browser': 'chrome', - 'platform': 'windows', - 'mobile': False - }) + scraper = cloudscraper.create_scraper( + {"browser": "chrome", "platform": "windows", "mobile": False} + ) try: - if url[:4] == 'http': # 网络url - ascii2d_url = 'https://ascii2d.net/search/uri' - m = MultipartEncoder( - fields={ - 'uri': url - } - ) + if url[:4] == "http": # 网络url + ascii2d_url = "https://ascii2d.net/search/uri" + m = MultipartEncoder(fields={"uri": url}) else: # 是否是本地文件 - ascii2d_url = 'https://ascii2d.net/search/file' + ascii2d_url = "https://ascii2d.net/search/file" m = MultipartEncoder( fields={ - 'file': ('filename', open(url, 'rb'), "type=multipart/form-data") + "file": ( + "filename", + open(url, "rb"), + "type=multipart/form-data", + ) } ) - headers = {'Content-Type': m.content_type} - res = scraper.post(ascii2d_url, headers=headers, data=m, **self.requests_kwargs) + headers = {"Content-Type": m.content_type} + res = scraper.post( + ascii2d_url, headers=headers, data=m, **self.requests_kwargs + ) if res.status_code == 200: if self.bovw: # 如果启用bovw选项,第一次请求是向服务器提交文件 - res = scraper.get(res.url.replace('/color/', '/bovw/'), **self.requests_kwargs) + res = scraper.get( + res.url.replace("/color/", "/bovw/"), **self.requests_kwargs + ) else: logger.error(res.status_code) logger.error(get_error_message(res.status_code)) diff --git a/PicImageSearch/baidu.py b/PicImageSearch/baidu.py index 00856eea..713cede9 100644 --- a/PicImageSearch/baidu.py +++ b/PicImageSearch/baidu.py @@ -1,47 +1,58 @@ import time + import requests -from requests_toolbelt import MultipartEncoder from PicImageSearch.Utils import BaiDuResponse +from requests_toolbelt import MultipartEncoder class BaiDu: - def __init__(self, **requests_kwargs): - self.url = 'https://graph.baidu.com/upload' + self.url = "https://graph.baidu.com/upload" self.requests_kwargs = requests_kwargs self.headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.72 Safari/537.36 Edg/89.0.774.45' + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.72 Safari/537.36 Edg/89.0.774.45" } def search(self, url: str) -> BaiDuResponse: - params = { - 'uptime': int(time.time()) - } - if url[:4] == 'http': # 网络url - m = {'image': url, - 'range': '{"page_from": "searchIndex"}', - 'from': "pc", - 'tn': 'pc', - 'image_source': 'PC_UPLOAD_MOVE', - 'sdkParams': '{"data":"a4388c3ef696d354e7f05402e1d38daf48bfb4f3d5bd941e2d0c920dc3b387065b7c85440986897b1f56ef6d352e3b94b3ea435ba5e1bb5a86c5feb88e2e9e1179abd5b8699370b6be8e7cfb96e6e605","key_id":"23","sign":"f22953e8"}' - } + params = {"uptime": int(time.time())} + if url[:4] == "http": # 网络url + m = { + "image": url, + "range": '{"page_from": "searchIndex"}', + "from": "pc", + "tn": "pc", + "image_source": "PC_UPLOAD_MOVE", + "sdkParams": '{"data":"a4388c3ef696d354e7f05402e1d38daf48bfb4f3d5bd941e2d0c920dc3b387065b7c85440986897b1f56ef6d352e3b94b3ea435ba5e1bb5a86c5feb88e2e9e1179abd5b8699370b6be8e7cfb96e6e605","key_id":"23","sign":"f22953e8"}', + } headers = self.headers else: # 文件 - m = MultipartEncoder(fields={ - 'image': ('filename', open(url, 'rb'), "type=multipart/form-data"), - 'range': '{"page_from": "searchIndex"}', - 'from': "pc", - 'tn': 'pc', - 'image_source': 'PC_UPLOAD_SEARCH_FILE', - 'sdkParams': '{"data":"a4388c3ef696d354e7f05402e1d38daf48bfb4f3d5bd941e2d0c920dc3b387065b7c85440986897b1f56ef6d352e3b94b3ea435ba5e1bb5a86c5feb88e2e9e1179abd5b8699370b6be8e7cfb96e6e605","key_id":"23","sign":"f22953e8"}' - }) - headers = {'Content-Type': m.content_type, - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.72 Safari/537.36 Edg/89.0.774.45'} - res = requests.post(self.url, headers=headers, params=params, data=m, verify=False, - **self.requests_kwargs) # 上传文件 + m = MultipartEncoder( + fields={ + "image": ("filename", open(url, "rb"), "type=multipart/form-data"), + "range": '{"page_from": "searchIndex"}', + "from": "pc", + "tn": "pc", + "image_source": "PC_UPLOAD_SEARCH_FILE", + "sdkParams": '{"data":"a4388c3ef696d354e7f05402e1d38daf48bfb4f3d5bd941e2d0c920dc3b387065b7c85440986897b1f56ef6d352e3b94b3ea435ba5e1bb5a86c5feb88e2e9e1179abd5b8699370b6be8e7cfb96e6e605","key_id":"23","sign":"f22953e8"}', + } + ) + headers = { + "Content-Type": m.content_type, + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.72 Safari/537.36 Edg/89.0.774.45", + } + res = requests.post( + self.url, + headers=headers, + params=params, + data=m, + verify=False, + **self.requests_kwargs + ) # 上传文件 - url = res.json()['data']['url'] + url = res.json()["data"]["url"] print(url) - resp = requests.get(url, headers=self.headers, verify=False, **self.requests_kwargs) + resp = requests.get( + url, headers=self.headers, verify=False, **self.requests_kwargs + ) print(resp.text) return BaiDuResponse(resp) diff --git a/PicImageSearch/google.py b/PicImageSearch/google.py index 4e61be9a..d8b2c511 100644 --- a/PicImageSearch/google.py +++ b/PicImageSearch/google.py @@ -1,8 +1,10 @@ +from urllib.parse import quote + import requests from bs4 import BeautifulSoup from loguru import logger -from urllib.parse import quote from PicImageSearch.Utils import GoogleResponse + from .Utils import get_error_message @@ -20,17 +22,17 @@ class Google: def __init__(self, **request_kwargs): params = dict() - self.url = 'https://www.google.com/searchbyimage' + self.url = "https://www.google.com/searchbyimage" self.params = params self.header = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0', + "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0", } self.requests_kwargs = request_kwargs @staticmethod def _slice(res, index): - soup = BeautifulSoup(res, 'html.parser') - resp = soup.find_all(class_='g') + soup = BeautifulSoup(res, "html.parser") + resp = soup.find_all(class_="g") pages = soup.find_all("td") return GoogleResponse(resp, pages[1:], index) @@ -57,16 +59,20 @@ def search(self, url) -> GoogleResponse: """ try: params = self.params - if url[:4] == 'http': - encoded_image_url = quote(url, safe='') - params['image_url'] = encoded_image_url + if url[:4] == "http": + encoded_image_url = quote(url, safe="") + params["image_url"] = encoded_image_url response = requests.get( - self.url, params=params, headers=self.header, **self.requests_kwargs) + self.url, params=params, headers=self.header, **self.requests_kwargs + ) else: - multipart = {'encoded_image': ( - url, open(url, 'rb'))} + multipart = {"encoded_image": (url, open(url, "rb"))} response = requests.post( - f"{self.url}/upload", files=multipart, headers=self.header, **self.requests_kwargs) + f"{self.url}/upload", + files=multipart, + headers=self.header, + **self.requests_kwargs, + ) if response.status_code == 200: return self._slice(response.text, 1) else: diff --git a/PicImageSearch/iqdb.py b/PicImageSearch/iqdb.py index eb6b0f92..9a61f7b2 100644 --- a/PicImageSearch/iqdb.py +++ b/PicImageSearch/iqdb.py @@ -2,8 +2,8 @@ from loguru import logger from requests_toolbelt import MultipartEncoder -from .Utils.iqdb import IqdbResponse from .Utils import get_error_message +from .Utils.iqdb import IqdbResponse class Iqdb: @@ -19,8 +19,8 @@ class Iqdb: """ def __init__(self, **requests_kwargs): - self.url = 'https://www.iqdb.org/' - self.url_3d = 'https://3d.iqdb.org/' + self.url = "https://www.iqdb.org/" + self.url_3d = "https://3d.iqdb.org/" self.requests_kwargs = requests_kwargs def search(self, url) -> IqdbResponse: @@ -49,18 +49,20 @@ def search(self, url) -> IqdbResponse: """ try: - if url[:4] == 'http': # 网络url - datas = { - "url": url - } + if url[:4] == "http": # 网络url + datas = {"url": url} res = requests.post(self.url, data=datas, **self.requests_kwargs) else: # 是否是本地文件 m = MultipartEncoder( fields={ - 'file': ('filename', open(url, 'rb'), "type=multipart/form-data") + "file": ( + "filename", + open(url, "rb"), + "type=multipart/form-data", + ) } ) - headers = {'Content-Type': m.content_type} + headers = {"Content-Type": m.content_type} res = requests.post(self.url, headers=headers, **self.requests_kwargs) if res.status_code == 200: # logger.info(res.text) @@ -75,7 +77,7 @@ def search_3d(self, url) -> IqdbResponse: Iqdb 3D ----------- Reverse image from https://3d.iqdb.org\n - + Return Attributes ----------- @@ -89,19 +91,23 @@ def search_3d(self, url) -> IqdbResponse: • .raw[0].size = First index detail of image size that was found """ try: - if url[:4] == 'http': # 网络url - datas = { - "url": url - } + if url[:4] == "http": # 网络url + datas = {"url": url} res = requests.post(self.url_3d, data=datas, **self.requests_kwargs) else: # 是否是本地文件 m = MultipartEncoder( fields={ - 'file': ('filename', open(url, 'rb'), "type=multipart/form-data") + "file": ( + "filename", + open(url, "rb"), + "type=multipart/form-data", + ) } ) - headers = {'Content-Type': m.content_type} - res = requests.post(self.url_3d, headers=headers, **self.requests_kwargs) + headers = {"Content-Type": m.content_type} + res = requests.post( + self.url_3d, headers=headers, **self.requests_kwargs + ) if res.status_code == 200: return IqdbResponse(res.content) else: diff --git a/PicImageSearch/saucenao.py b/PicImageSearch/saucenao.py index 460df95c..acd4d23e 100644 --- a/PicImageSearch/saucenao.py +++ b/PicImageSearch/saucenao.py @@ -1,24 +1,25 @@ import requests from loguru import logger from requests_toolbelt import MultipartEncoder + from .Utils import SauceNAOResponse, get_error_message class SauceNAO: - - def __init__(self, - api_key: str = None, - *, - numres: int = 5, - hide: int = 1, - minsim: int = 30, - output_type: int = 2, - testmode: int = 0, - dbmask: int = None, - dbmaski: int = None, - db: int = 999, - **requests_kwargs - ) -> None: + def __init__( + self, + api_key: str = None, + *, + numres: int = 5, + hide: int = 1, + minsim: int = 30, + output_type: int = 2, + testmode: int = 0, + dbmask: int = None, + dbmaski: int = None, + db: int = 999, + **requests_kwargs + ) -> None: """ SauceNAO ----------- @@ -38,22 +39,22 @@ def __init__(self, :param hide:(int) result hiding control, none=0, clear return value (default)=1, suspect return value=2, all return value=3 """ # minsim 控制最小相似度 - self.url = 'https://saucenao.com/search.php' + self.url = "https://saucenao.com/search.php" self.requests_kwargs = requests_kwargs params = { - 'testmode': testmode, - 'numres': numres, - 'output_type': output_type, - 'hide': hide, - 'db': db, - 'minsim': minsim + "testmode": testmode, + "numres": numres, + "output_type": output_type, + "hide": hide, + "db": db, + "minsim": minsim, } if api_key is not None: - params['api_key'] = api_key + params["api_key"] = api_key if dbmask is not None: - params['dbmask'] = dbmask + params["dbmask"] = dbmask if dbmaski is not None: - params['dbmaski'] = dbmaski + params["dbmaski"] = dbmaski self.params = params def search(self, url: str) -> SauceNAOResponse: @@ -82,20 +83,34 @@ def search(self, url: str) -> SauceNAOResponse: Params Keys ----------- :param url: network address or local - + further documentation visit https://saucenao.com/user.php?page=search-api """ try: params = self.params headers = {} m = None - if url[:4] == 'http': # 网络url - params['url'] = url + if url[:4] == "http": # 网络url + params["url"] = url else: # 文件 - m = MultipartEncoder(fields={'file': ('filename', open(url, 'rb'), "type=multipart/form-data")}) - headers = {'Content-Type': m.content_type} - resp = requests.post(self.url, headers=headers, data=m, params=params, verify=False, - **self.requests_kwargs) + m = MultipartEncoder( + fields={ + "file": ( + "filename", + open(url, "rb"), + "type=multipart/form-data", + ) + } + ) + headers = {"Content-Type": m.content_type} + resp = requests.post( + self.url, + headers=headers, + data=m, + params=params, + verify=False, + **self.requests_kwargs + ) if resp.status_code == 200: data = resp.json() return SauceNAOResponse(data) diff --git a/PicImageSearch/tracemoe.py b/PicImageSearch/tracemoe.py index 5c2372eb..758e8315 100644 --- a/PicImageSearch/tracemoe.py +++ b/PicImageSearch/tracemoe.py @@ -1,47 +1,51 @@ +from pathlib import Path from typing import List, Optional + import requests from loguru import logger -from pathlib import Path + from .Utils import get_error_message class TraceMoeAnilist: def __init__(self, data): - self.id: int = data['id'] + self.id: int = data["id"] """匹配的Anilist ID见https://anilist.co/""" - self.idMal: int = data['idMal'] + self.idMal: int = data["idMal"] """匹配的MyAnimelist ID见https://myanimelist.net/""" - self.title: dict = data['title'] + self.title: dict = data["title"] """番剧名字""" - self.title_native: str = data['title']['native'] + self.title_native: str = data["title"]["native"] """番剧国际命名""" - self.title_english: str = data['title']['english'] + self.title_english: str = data["title"]["english"] """番剧英文命名""" - self.title_romaji: str = data['title']['romaji'] + self.title_romaji: str = data["title"]["romaji"] """番剧罗马命名""" - self.title_chinese: str = 'NULL' + self.title_chinese: str = "NULL" """番剧中文命名""" - self.synonyms: list = data['synonyms'] + self.synonyms: list = data["synonyms"] """备用英文标题""" - self.isAdult: bool = data['isAdult'] + self.isAdult: bool = data["isAdult"] """是否R18""" - def setChinese(self, data): + def set_chinese(self, data): self.title = data - if 'chinese' in data.keys(): - self.title_chinese: str = data['chinese'] # 番剧中文命名 + if "chinese" in data.keys(): + self.title_chinese: str = data["chinese"] # 番剧中文命名 def __repr__(self): - return f'( ' + return ( + f"( " + ) class TraceMoeNorm: - def __init__(self, data, chineseTitle=True, mute=False, size=None): + def __init__(self, data, chinese_title=True, mute=False, size=None): """ :param data: 数据 - :param chineseTitle: 中文番剧名称显示 + :param chinese_title: 中文番剧名称显示 :param mute: 预览视频静音 :param size: 视频与图片大小(s/m/l) """ @@ -51,13 +55,13 @@ def __init__(self, data, chineseTitle=True, mute=False, size=None): """匹配的MyAnimelist ID见https://myanimelist.net/""" self.title: dict = {} """剧名字""" - self.title_native: str = 'NULL' + self.title_native: str = "NULL" """番剧国际命名""" - self.title_english: str = 'NULL' + self.title_english: str = "NULL" """剧英文命名""" - self.title_romaji: str = 'NULL' + self.title_romaji: str = "NULL" """番剧罗马命名""" - self.title_chinese: str = 'NULL' + self.title_chinese: str = "NULL" """番剧中文命名""" self.anilist: Optional[int] = None """匹配的Anilist ID见https://anilist.co/""" @@ -65,38 +69,40 @@ def __init__(self, data, chineseTitle=True, mute=False, size=None): """备用英文标题""" self.isAdult: bool = False """是否R18""" - if type(data['anilist']) == dict: - self.anilist = data['anilist']['id'] # 匹配的Anilist ID见https://anilist.co/ - self.idMal: int = data['anilist']['idMal'] # 匹配的MyAnimelist ID见https://myanimelist.net/ - self.title: dict = data['anilist']['title'] # 番剧名字 - self.title_native: str = data['anilist']['title']['native'] # 番剧国际命名 - self.title_english: str = data['anilist']['title']['english'] # 番剧英文命名 - self.title_romaji: str = data['anilist']['title']['romaji'] # 番剧罗马命名 - self.synonyms: list = data['anilist']['synonyms'] # 备用英文标题 - self.isAdult: bool = data['anilist']['isAdult'] # 是否R18 - if chineseTitle: - self.title_chinese: str = self._getChineseTitle() # 番剧中文命名 + if type(data["anilist"]) == dict: + self.anilist = data["anilist"]["id"] # 匹配的Anilist ID见https://anilist.co/ + self.idMal: int = data["anilist"][ + "idMal" + ] # 匹配的MyAnimelist ID见https://myanimelist.net/ + self.title: dict = data["anilist"]["title"] # 番剧名字 + self.title_native: str = data["anilist"]["title"]["native"] # 番剧国际命名 + self.title_english: str = data["anilist"]["title"]["english"] # 番剧英文命名 + self.title_romaji: str = data["anilist"]["title"]["romaji"] # 番剧罗马命名 + self.synonyms: list = data["anilist"]["synonyms"] # 备用英文标题 + self.isAdult: bool = data["anilist"]["isAdult"] # 是否R18 + if chinese_title: + self.title_chinese: str = self._get_chinese_title() # 番剧中文命名 else: - self.anilist = data['anilist'] # 匹配的Anilist ID见https://anilist.co/ - self.filename: str = data['filename'] + self.anilist = data["anilist"] # 匹配的Anilist ID见https://anilist.co/ + self.filename: str = data["filename"] """找到匹配项的文件名""" - self.episode: int = data['episode'] + self.episode: int = data["episode"] """估计的匹配的番剧的集数""" - self.From: int = data['from'] + self.From: int = data["from"] """匹配场景的开始时间""" - self.To: int = data['to'] + self.To: int = data["to"] """匹配场景的结束时间""" - self.similarity: float = float(data['similarity']) + self.similarity: float = float(data["similarity"]) """相似度,相似性低于 87% 的搜索结果可能是不正确的结果""" - self.video: str = data['video'] + self.video: str = data["video"] """预览视频""" - self.image: str = data['image'] + self.image: str = data["image"] """预览图像""" - if size in ['l', 's', 'm']: # 大小设置 - self.video += '&size=' + size - self.image += '&size=' + size + if size in ["l", "s", "m"]: # 大小设置 + self.video += "&size=" + size + self.image += "&size=" + size if mute: # 视频静音设置 - self.video += '&mute' + self.video += "&mute" # ---------------过时版本----------------------- # self.anilist_id: int = data['anilist_id'] @@ -132,7 +138,7 @@ def __init__(self, data, chineseTitle=True, mute=False, size=None): # url = url + '&mute' # return url - def download_image(self, filename='image.png', path: Path = Path.cwd()) -> Path: + def download_image(self, filename="image.png", path: Path = Path.cwd()) -> Path: """ 下载缩略图 @@ -142,12 +148,12 @@ def download_image(self, filename='image.png', path: Path = Path.cwd()) -> Path: """ with requests.get(self.image, stream=True) as resp: endpoint = path.joinpath(filename) - with open(endpoint, 'wb') as fd: + with open(endpoint, "wb") as fd: for chunk in resp.iter_content(): fd.write(chunk) return endpoint - def download_video(self, filename='video.mp4', path: Path = Path.cwd()) -> Path: + def download_video(self, filename="video.mp4", path: Path = Path.cwd()) -> Path: """ 下载预览视频 @@ -158,23 +164,25 @@ def download_video(self, filename='video.mp4', path: Path = Path.cwd()) -> Path: """ with requests.get(self.video, stream=True) as resp: endpoint = path.joinpath(filename) - with open(endpoint, 'wb') as fd: + with open(endpoint, "wb") as fd: for chunk in resp.iter_content(): fd.write(chunk) return endpoint - def _getChineseTitle(self): - return self.animeTitle(self.origin['anilist']['id'])['data']['Media']['title']['chinese'] + def _get_chinese_title(self): + return self.get_anime_title(self.origin["anilist"]["id"])["data"]["Media"][ + "title" + ]["chinese"] @staticmethod - def animeTitle(anilistID: int) -> dict: + def get_anime_title(anilist_id: int) -> dict: """获取中文标题 - :param anilistID: id + :param anilist_id: id :return: dict """ - query = ''' + query = """ query ($id: Int) { # Define which variables will be used in the query (id) Media (id: $id, type: ANIME) { # Insert our variables into the query arguments (id) (type: ANIME is hard-coded in the query) id @@ -185,37 +193,37 @@ def animeTitle(anilistID: int) -> dict: } } } - ''' + """ # Define our query variables and values that will be used in the query request - variables = { - 'id': anilistID - } + variables = {"id": anilist_id} - url = 'https://trace.moe/anilist/' + url = "https://trace.moe/anilist/" # Make the HTTP Api request - response = requests.post(url, json={'query': query, 'variables': variables}) + response = requests.post(url, json={"query": query, "variables": variables}) return response.json() def __repr__(self): - return f'' + return f"" class TraceMoeResponse: - def __init__(self, resp, chineseTitle, mute, size): + def __init__(self, resp, chinese_title, mute, size): self.origin: dict = resp """原始数据""" self.raw: List[TraceMoeNorm] = list() """结果返回值""" - resp_docs = resp['result'] + resp_docs = resp["result"] for i in resp_docs: - self.raw.append(TraceMoeNorm(i, chineseTitle=chineseTitle, mute=mute, size=size)) + self.raw.append( + TraceMoeNorm(i, chinese_title=chinese_title, mute=mute, size=size) + ) self.count: int = len(self.raw) """搜索结果数量""" - self.frameCount: int = resp['frameCount'] + self.frameCount: int = resp["frameCount"] """搜索的帧总数""" - self.error: str = resp['error'] + self.error: str = resp["error"] """错误报告""" # ---------------过时版本----------------------- # self.RawDocsSearchTime: int = resp['RawDocsSearchTime'] # 从数据库检索帧所用的时间 @@ -228,7 +236,7 @@ def __init__(self, resp, chineseTitle, mute, size): # self.quota_ttl: int = resp['quota_ttl'] # 配额重置之前的时间(秒) def __repr__(self): - return f'' + return f"" class TraceMoe: - TraceMoeURL = 'https://api.trace.moe/search' - MeURL = 'https://api.trace.moe/me' + TraceMoeURL = "https://api.trace.moe/search" + MeURL = "https://api.trace.moe/me" def __init__(self, mute=False, size=None, **requests_kwargs): """主类 @@ -269,8 +277,10 @@ def me(self, key=None) -> TraceMoeMe: # 获取自己的信息 try: params = None if key: - params = {'key': key} - res = requests.get(self.MeURL, params=params, verify=False, **self.requests_kwargs) + params = {"key": key} + res = requests.get( + self.MeURL, params=params, verify=False, **self.requests_kwargs + ) if res.status_code == 200: data = res.json() return TraceMoeMe(data) @@ -280,55 +290,72 @@ def me(self, key=None) -> TraceMoeMe: # 获取自己的信息 logger.info(e) @staticmethod - def _firstIf(param): + def _first_if(param): if param != "": param += "&" return param @staticmethod - def setParams(url, anilistID, anilistInfo, cutBorders): + def set_params(url, anilist_id, anilist_info, cut_borders): params = {} - if anilistInfo: + if anilist_info: params["anilistInfo"] = True - if cutBorders: + if cut_borders: params["cutBorders"] = True - if anilistID: - params["anilistID"] = anilistID + if anilist_id: + params["anilistID"] = anilist_id if url: params["url"] = url return params - def search(self, url, key=None, anilistID=None, - chineseTitle=True, anilistInfo=True, cutBorders=True) -> TraceMoeResponse: + def search( + self, + url, + key=None, + anilist_id=None, + chinese_title=True, + anilist_info=True, + cut_borders=True, + ) -> TraceMoeResponse: """识别图片 :param key: API密钥 https://soruly.github.io/trace.moe-api/#/limits?id=api-search-quota-and-limits :param url: 网络地址(http或https链接)或本地(本地图片路径) When using video / gif, only the 1st frame would be extracted for searching. - :param anilistID: 搜索限制为特定的 Anilist ID(默认无) - :param anilistInfo: 详细信息(默认开启) - :param chineseTitle: 中文番剧标题 - :param cutBorders: 切割黑边框(默认开启) + :param anilist_id: 搜索限制为特定的 Anilist ID(默认无) + :param anilist_info: 详细信息(默认开启) + :param chinese_title: 中文番剧标题 + :param cut_borders: 切割黑边框(默认开启) """ try: headers = None if headers: headers = {"x-trace-key": key} - if url[:4] == 'http': # 网络url - params = self.setParams(url, anilistID, anilistInfo, cutBorders) - res = requests.get(self.TraceMoeURL, headers=headers, params=params, verify=False, - **self.requests_kwargs) + if url[:4] == "http": # 网络url + params = self.set_params(url, anilist_id, anilist_info, cut_borders) + res = requests.get( + self.TraceMoeURL, + headers=headers, + params=params, + verify=False, + **self.requests_kwargs, + ) if res.status_code == 200: data = res.json() - return TraceMoeResponse(data, chineseTitle, self.mute, self.size) + return TraceMoeResponse(data, chinese_title, self.mute, self.size) else: logger.error(get_error_message(res.status_code)) else: # 是否是本地文件 - params = self.setParams(None, anilistID, anilistInfo, cutBorders) - res = requests.post(self.TraceMoeURL, headers=headers, params=params, - files={"image": open(url, "rb")}, **self.requests_kwargs) + params = self.set_params(None, anilist_id, anilist_info, cut_borders) + res = requests.post( + self.TraceMoeURL, + headers=headers, + params=params, + files={"image": open(url, "rb")}, + **self.requests_kwargs, + ) if res.status_code == 200: data = res.json() - return TraceMoeResponse(data, chineseTitle, self.mute, self.size) + return TraceMoeResponse(data, chinese_title, self.mute, self.size) else: logger.error(get_error_message(res.status_code)) except Exception as e: diff --git a/docs/Google/Demo.md b/docs/Google/Demo.md index cf1874de..61ddfd3f 100644 --- a/docs/Google/Demo.md +++ b/docs/Google/Demo.md @@ -12,8 +12,8 @@ res = google.search("https://media.discordapp.net/attachments/783138508038471701 logger.info(res.origin) # Original Data logger.info(res.raw.__str__()) # Raw Data # Should start from index 2, because from there is matching image -logger.info(res.raw[2]) # -logger.info(res.raw[2].thumbnail[0]) # No directable url +logger.info(res.raw[2]) # +logger.info(res.raw[2].thumbnail[0]) # No detectable url logger.info(res.raw[2].titles[0]) # The Strongest Dull Prince's Secret Battle for the Throne ... logger.info(res.raw[2].urls[0]) # https://kiryuu.co/the-strongest-dull-princes-secret-battle-for-the-throne-chapter-3-bahasa-indonesia/ ``` @@ -29,9 +29,9 @@ logger.info(res.raw[2].urls[0]) # https://kiryuu.co/the-strongest-dull-princes- 程序输出,部分过长数据已省略 ```shell 2021-03-06 19:54:45.528 | INFO | __main__::8 - [

Web results

The seventh prince, Arnold Lakes Adler. The young man who was inferior to his younger twin brother in every way, the dull prince. Incompetent and lethargic ...
,
The Strongest Dull Prince's Secret Battle for the Throne is a Manga (En/Raw), Ecchi series written by Tamba This Comic is About. The Adresia empire on the ...
,

Web results

1500 × 2133 — The Adresia empire on the Vogel continent. There is a battle over the throne of such an empire that possesses powerful military and vast territory. With the...
,
535 × 640 · 7 Jan 2021 — Strongest Dull Prince, it's full title being “The Strongest Dull Prince's Secret Battle for the Throne” is a Japanese web novel where the main ...
,
914 × 1300 — Read The Strongest Dull Prince'S Secret Battle For The Throne - Chapter 3 - A brief description of the "The Strongest Dull Prince'S Secret Battle For The Throne" ...
,
914 × 1300 — The Strongest Dull Prince's Secret Battle for the Throne. Chapter 3.
,
535 × 640 · 23 Sep 2017 — Profile Edit Set | Strongest Dull Prince · user uploaded image. 63. 31. Featured post. 17 Days of Winter Challenge ☃ · user uploaded image.
] - 2021-03-06 19:54:45.537 | INFO | __main__::9 - [, , , , , , ] - 2021-03-06 19:54:45.538 | INFO | __main__::11 - - 2021-03-06 19:54:45.538 | INFO | __main__::12 - No directable url + 2021-03-06 19:54:45.537 | INFO | __main__::9 - [, , , , , , ] + 2021-03-06 19:54:45.538 | INFO | __main__::11 - + 2021-03-06 19:54:45.538 | INFO | __main__::12 - No detectable url 2021-03-06 19:54:45.538 | INFO | __main__::13 - Chapter 3 | The Strongest Dull Prince's Secret Battle for the ... 2021-03-06 19:54:45.538 | INFO | __main__::14 - https://rawdevart.com/comic/the-strongest-dull-princes-secret-battle-for-the-throne/chapter-3/ ``` \ No newline at end of file diff --git a/setup.py b/setup.py index 65e774b9..10b8e0e3 100644 --- a/setup.py +++ b/setup.py @@ -1,13 +1,16 @@ import setuptools -requirements = [requirement.strip() for requirement in open('requirements.txt', 'r', encoding='utf-8').readlines()] +requirements = [ + requirement.strip() + for requirement in open("requirements.txt", "r", encoding="utf-8").readlines() +] -with open("README.md", "r", encoding='utf-8') as fh: +with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() setuptools.setup( name="PicImageSearch", - version='2.2.7', + version="2.2.7", author="kitUIN", author_email="kulujun@gmail.com", description="PicImageSearch APIs for Python 3.x 适用于 Python 3 以图搜源整合API", @@ -19,6 +22,7 @@ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", - ], install_requires=requirements, - python_requires='>=3.6', + ], + install_requires=requirements, + python_requires=">=3.6", ) diff --git a/test/Async/test3.py b/test/Async/ascii2d_test.py similarity index 52% rename from test/Async/test3.py rename to test/Async/ascii2d_test.py index d94d14f1..cd96a090 100644 --- a/test/Async/test3.py +++ b/test/Async/ascii2d_test.py @@ -1,22 +1,27 @@ import asyncio from loguru import logger - -from PicImageSearch import NetWork, AsyncAscii2D +from PicImageSearch import AsyncAscii2D, NetWork async def main(): async with NetWork() as client: ascii2d = AsyncAscii2D(client=client, bovw=True) - res = await ascii2d.search('https://pixiv.cat/77702503-1.jpg') + res = await ascii2d.search("https://pixiv.cat/77702503-1.jpg") # res = await ascii2d.search(r'C:/kitUIN/img/tinted-good.jpg') # 搜索本地图片 # logger.info(res.origin) # 原始数据 # logger.info(res.raw) # - logger.info(res.raw[1]) # - logger.info(res.raw[1].thumbnail) # https://ascii2d.net/thumbnail/2/c/5/e/2c5e6a18fbba730a65cef0549e3c5768.jpg + logger.info( + res.raw[1] + ) # + logger.info( + res.raw[1].thumbnail + ) # https://ascii2d.net/thumbnail/2/c/5/e/2c5e6a18fbba730a65cef0549e3c5768.jpg logger.info(res.raw[1].title) # 2020.08.30 logger.info(res.raw[1].authors) # hews__ - logger.info(res.raw[1].url) # https://twitter.com/hews__/status/1299728221005643776 + logger.info( + res.raw[1].url + ) # https://twitter.com/hews__/status/1299728221005643776 logger.info(res.raw[1].detail) # 2570x4096 JPEG 1087.9KB diff --git a/test/Async/test6.py b/test/Async/baidu_test.py similarity index 60% rename from test/Async/test6.py rename to test/Async/baidu_test.py index 9b4d9f43..aeb741b6 100644 --- a/test/Async/test6.py +++ b/test/Async/baidu_test.py @@ -1,15 +1,18 @@ import asyncio + from loguru import logger from PicImageSearch import AsyncBaiDu, NetWork async def main(): - async with NetWork(proxy='http://127.0.0.1:10809') as client: + async with NetWork(proxy="http://127.0.0.1:10809") as client: baidu = AsyncBaiDu(client=client) - res = await baidu.search('https://i0.hdslb.com/bfs/article/e756dd0a8375a4c30cc0ee3a51c8067157486135.jpg@1524w_856h.webp') + res = await baidu.search( + "https://i0.hdslb.com/bfs/article/e756dd0a8375a4c30cc0ee3a51c8067157486135.jpg@1524w_856h.webp" + ) logger.info(res.item) logger.info(res.origin) - if hasattr(res, 'same'): # 存在来源结果 + if hasattr(res, "same"): # 存在来源结果 logger.info(res.raw[0].page_title) logger.info(res.raw[0].abstract) logger.info(res.raw[0].url) @@ -17,5 +20,6 @@ async def main(): else: logger.info(res.similar) + loop = asyncio.new_event_loop() -loop.run_until_complete(main()) \ No newline at end of file +loop.run_until_complete(main()) diff --git a/test/Async/google_test.py b/test/Async/google_test.py new file mode 100644 index 00000000..11c476de --- /dev/null +++ b/test/Async/google_test.py @@ -0,0 +1,30 @@ +import asyncio + +from loguru import logger +from PicImageSearch import AsyncGoogle, NetWork + + +async def main(): + async with NetWork(proxy="http://127.0.0.1:10809") as client: + google = AsyncGoogle(client=client) + res = await google.search( + "https://media.discordapp.net/attachments/783138508038471701/813452582948306974/hl-18-1-900x1280.png?width=314&height=447" + ) + # res = await google.search(r'C:/kitUIN/img/tinted-good.jpg') # Search Image URL or path + logger.info(res.origin) # Original Data + logger.info(res.raw.__str__()) # Raw Data + # Should start from index 2, because from there is matching image + logger.info( + res.raw[2] + ) # + logger.info(res.raw[2].thumbnail) # No detectable url + logger.info( + res.raw[2].title + ) # The Strongest Dull Prince's Secret Battle for the Throne ... + logger.info( + res.raw[2].url + ) # https://kiryuu.co/the-strongest-dull-princes-secret-battle-for-the-throne-chapter-3-bahasa-indonesia/ + + +loop = asyncio.new_event_loop() +loop.run_until_complete(main()) diff --git a/test/Async/test4.py b/test/Async/iqdb_test.py similarity index 95% rename from test/Async/test4.py rename to test/Async/iqdb_test.py index 98f0e698..8213245d 100644 --- a/test/Async/test4.py +++ b/test/Async/iqdb_test.py @@ -1,8 +1,7 @@ import asyncio from loguru import logger - -from PicImageSearch import NetWork, AsyncIqdb +from PicImageSearch import AsyncIqdb, NetWork async def main(): @@ -24,7 +23,6 @@ async def main(): logger.info("Google搜图链接: " + res.google) logger.info("相似度低的结果: " + str(res.more)) + loop = asyncio.new_event_loop() loop.run_until_complete(main()) - - diff --git a/test/Async/test2.py b/test/Async/saucenao_test.py similarity index 56% rename from test/Async/test2.py rename to test/Async/saucenao_test.py index 43d151f9..ccd5bf5c 100644 --- a/test/Async/test2.py +++ b/test/Async/saucenao_test.py @@ -1,26 +1,30 @@ import asyncio from loguru import logger - from PicImageSearch import AsyncSauceNAO, NetWork async def main(): - async with NetWork(proxy='http://127.0.0.1:10809') as client: - saucenao = AsyncSauceNAO(api_key='54a8d90c583d3b66b6dd3d7e9001a39b588cd842', client=client) - res = await saucenao.search('https://pixiv.cat/77702503-1.jpg') + async with NetWork(proxy="http://127.0.0.1:10809") as client: + saucenao = AsyncSauceNAO( + api_key="54a8d90c583d3b66b6dd3d7e9001a39b588cd842", client=client + ) + res = await saucenao.search("https://pixiv.cat/77702503-1.jpg") # res = await saucenao.search(r'C:\Users\kulujun\Pictures\90139691_p0.png') #搜索本地图片 logger.info(res.origin) # 原始数据 logger.info(res.raw) # logger.info(res.raw[0]) # logger.info(res.long_remaining) # 99 logger.info(res.short_remaining) # 3 - logger.info(res.raw[ - 0].thumbnail) # https://img1.saucenao.com/res/pixiv/7770/77702503_p0_master1200.jpg?auth=pJmiu8qNI1z2fLBAlAsx7A&exp=1604748473 + logger.info( + res.raw[0].thumbnail + ) # https://img1.saucenao.com/res/pixiv/7770/77702503_p0_master1200.jpg?auth=pJmiu8qNI1z2fLBAlAsx7A&exp=1604748473 logger.info(res.raw[0].similarity) # 92.22 logger.info(res.raw[0].title) # MDR♡ logger.info(res.raw[0].author) # CeNanGam - logger.info(res.raw[0].url) # https://www.pixiv.net/member_illust.php?mode=medium&illust_id=77702503 + logger.info( + res.raw[0].url + ) # https://www.pixiv.net/member_illust.php?mode=medium&illust_id=77702503 logger.info(res.raw[0].pixiv_id) # 77702503 logger.info(res.raw[0].member_id) # 4089680 diff --git a/test/Async/test5.py b/test/Async/test5.py deleted file mode 100644 index 6dd2c7cb..00000000 --- a/test/Async/test5.py +++ /dev/null @@ -1,23 +0,0 @@ -import asyncio - -from loguru import logger - -from PicImageSearch import AsyncGoogle, NetWork - - -async def main(): - async with NetWork(proxy='http://127.0.0.1:10809') as client: - google = AsyncGoogle(client=client) - res = await google.search("https://media.discordapp.net/attachments/783138508038471701/813452582948306974/hl-18-1-900x1280.png?width=314&height=447") - # res = await google.search(r'C:/kitUIN/img/tinted-good.jpg') # Search Image URL or path - logger.info(res.origin) # Original Data - logger.info(res.raw.__str__()) # Raw Data - # Should start from index 2, because from there is matching image - logger.info(res.raw[2]) # - logger.info(res.raw[2].thumbnail) # No directable url - logger.info(res.raw[2].title) # The Strongest Dull Prince's Secret Battle for the Throne ... - logger.info(res.raw[2].url) # https://kiryuu.co/the-strongest-dull-princes-secret-battle-for-the-throne-chapter-3-bahasa-indonesia/ - - -loop = asyncio.new_event_loop() -loop.run_until_complete(main()) \ No newline at end of file diff --git a/test/Async/test1.py b/test/Async/tracemoe_test.py similarity index 88% rename from test/Async/test1.py rename to test/Async/tracemoe_test.py index 12f9a824..1a0c510e 100644 --- a/test/Async/test1.py +++ b/test/Async/tracemoe_test.py @@ -1,14 +1,13 @@ import asyncio from loguru import logger - -from PicImageSearch import NetWork, AsyncTraceMoe +from PicImageSearch import AsyncTraceMoe, NetWork async def main(): async with NetWork() as client: tracemoe = AsyncTraceMoe(mute=False, size=None, client=client) - res = await tracemoe.search('https://trace.moe/img/tinted-good.jpg') # 搜索网络图片 + res = await tracemoe.search("https://trace.moe/img/tinted-good.jpg") # 搜索网络图片 # res = await tracemoe.search(r'C:/Users/kulujun/Pictures/1.png') # 搜索本地图片 logger.info(res.origin) logger.info(res.raw) diff --git a/test/ascii2d_test.py b/test/ascii2d_test.py new file mode 100644 index 00000000..f18a1d1f --- /dev/null +++ b/test/ascii2d_test.py @@ -0,0 +1,18 @@ +from loguru import logger +from PicImageSearch import Ascii2D + +ascii2d = Ascii2D(bovw=False) +res = ascii2d.search("https://pixiv.cat/77702503-1.jpg") +# res = ascii2d.search(r'C:/kitUIN/img/tinted-good.jpg') # 搜索本地图片 +# logger.info(res.origin) # 原始数据 +# logger.info(res.raw) # +logger.info( + res.raw[1] +) # +logger.info( + res.raw[1].thumbnail +) # https://ascii2d.net/thumbnail/2/c/5/e/2c5e6a18fbba730a65cef0549e3c5768.jpg +logger.info(res.raw[1].title) # 2020.08.30 +logger.info(res.raw[1].authors) # hews__ +logger.info(res.raw[1].url) # https://twitter.com/hews__/status/1299728221005643776 +logger.info(res.raw[1].detail) # 2570x4096 JPEG 1087.9KB diff --git a/test/test6.py b/test/baidu_test.py similarity index 64% rename from test/test6.py rename to test/baidu_test.py index 05c2ebea..5c96c57f 100644 --- a/test/test6.py +++ b/test/baidu_test.py @@ -1,11 +1,13 @@ -from PicImageSearch import BaiDu from loguru import logger +from PicImageSearch import BaiDu baidu = BaiDu() -res = baidu.search('https://i0.hdslb.com/bfs/article/e756dd0a8375a4c30cc0ee3a51c8067157486135.jpg@1524w_856h.webp') +res = baidu.search( + "https://i0.hdslb.com/bfs/article/e756dd0a8375a4c30cc0ee3a51c8067157486135.jpg@1524w_856h.webp" +) logger.info(res.item) logger.info(res.origin) -if hasattr(res, 'same'): # 存在来源结果 +if hasattr(res, "same"): # 存在来源结果 logger.info(res.raw[0].page_title) logger.info(res.raw[0].abstract) logger.info(res.raw[0].url) diff --git a/test/google_test.py b/test/google_test.py new file mode 100644 index 00000000..b5872e18 --- /dev/null +++ b/test/google_test.py @@ -0,0 +1,27 @@ +from loguru import logger +from PicImageSearch import Google + +google = Google() +res = google.search( + "https://media.discordapp.net/attachments/783138508038471701/813452582948306974/hl-18-1-900x1280.png?width=314&height=447" +) +# res = google.search(r'C:/kitUIN/img/tinted-good.jpg') # Search Image URL or path +logger.info(res.origin) # Original Data + +# Should start from index 2, because from there is matching image +logger.info( + res.raw[4] +) # +logger.info(res.raw[4].thumbnail) # No detectable url +logger.info( + res.raw[4].title +) # The Strongest Dull Prince's Secret Battle for the Throne ... +logger.info( + res.raw[4].url +) # https://kiryuu.co/the-strongest-dull-princes-secret-battle-for-the-throne-chapter-3-bahasa-indonesia/ +logger.info(res.page) +res2 = google.goto_page(res.get_page_url(2), 2) +logger.info(res2.raw[2]) +logger.info(res2.raw[2].thumbnail) +logger.info(res2.raw[2].title) +logger.info(res2.raw[2].url) diff --git a/test/test4_3d.py b/test/iqdb_3d_test.py similarity index 76% rename from test/test4_3d.py rename to test/iqdb_3d_test.py index 9344a41d..fa5682a0 100644 --- a/test/test4_3d.py +++ b/test/iqdb_3d_test.py @@ -1,14 +1,16 @@ from loguru import logger from PicImageSearch import Iqdb +# 如果需要代理 _REQUESTS_KWARGS = { - 'proxies': { - 'https': 'http://127.0.0.1:8888', - } - # 如果需要代理 + "proxies": { + "https": "http://127.0.0.1:8888", + } } iqdb = Iqdb() -res = iqdb.search_3d('http://3d.iqdb.org/3dbooru/2/8/6/2865ab9c1d9fe8860892945e79435219.jpg') +res = iqdb.search_3d( + "https://3d.iqdb.org/3dbooru/2/8/6/2865ab9c1d9fe8860892945e79435219.jpg" +) # logger.info(res.origin) # logger.info(res.raw) @@ -20,5 +22,3 @@ logger.info("图片来源: " + res.raw[0].source) logger.info("其他图片来源: " + str(res.raw[0].other_source)) logger.info("相似度低的结果: " + str(res.more)) - - diff --git a/test/test4.py b/test/iqdb_test.py similarity index 94% rename from test/test4.py rename to test/iqdb_test.py index 1e6297e7..4a276e34 100644 --- a/test/test4.py +++ b/test/iqdb_test.py @@ -8,7 +8,7 @@ # 如果需要代理 } iqdb = Iqdb() -res = iqdb.search(r'https://pixiv.cat/77702503-1.jpg') +res = iqdb.search(r"https://pixiv.cat/77702503-1.jpg") # logger.info(res.origin) # logger.info(res.raw) @@ -24,6 +24,3 @@ logger.info("TinEye搜图链接: " + res.tineye) logger.info("Google搜图链接: " + res.google) logger.info("相似度低的结果: " + str(res.more)) - - - diff --git a/test/test2.py b/test/saucenao_test.py similarity index 50% rename from test/test2.py rename to test/saucenao_test.py index debb0804..044f7b13 100644 --- a/test/test2.py +++ b/test/saucenao_test.py @@ -1,5 +1,4 @@ from loguru import logger - from PicImageSearch import SauceNAO _REQUESTS_KWARGS = { @@ -8,18 +7,22 @@ # } # 如果需要代理 } -saucenao = SauceNAO(api_key='54a8d90c583d3b66b6dd3d7e9001a39b588cd842') -res = saucenao.search('https://pixiv.cat/77702503-1.jpg') -#res = saucenao.search(r'C:/kitUIN/img/tinted-good.jpg') #搜索本地图片 +saucenao = SauceNAO(api_key="54a8d90c583d3b66b6dd3d7e9001a39b588cd842") +res = saucenao.search("https://pixiv.cat/77702503-1.jpg") +# res = saucenao.search(r'C:/kitUIN/img/tinted-good.jpg') #搜索本地图片 logger.info(res.origin) # 原始数据 logger.info(res.raw) # logger.info(res.raw[0]) # logger.info(res.long_remaining) # 99 logger.info(res.short_remaining) # 3 -logger.info(res.raw[0].thumbnail) # https://img1.saucenao.com/res/pixiv/7770/77702503_p0_master1200.jpg?auth=pJmiu8qNI1z2fLBAlAsx7A&exp=1604748473 +logger.info( + res.raw[0].thumbnail +) # https://img1.saucenao.com/res/pixiv/7770/77702503_p0_master1200.jpg?auth=pJmiu8qNI1z2fLBAlAsx7A&exp=1604748473 logger.info(res.raw[0].similarity) # 92.22 logger.info(res.raw[0].title) # MDR♡ logger.info(res.raw[0].author) # CeNanGam -logger.info(res.raw[0].url) # https://www.pixiv.net/member_illust.php?mode=medium&illust_id=77702503 +logger.info( + res.raw[0].url +) # https://www.pixiv.net/member_illust.php?mode=medium&illust_id=77702503 logger.info(res.raw[0].pixiv_id) # 77702503 -logger.info(res.raw[0].member_id) # 4089680 \ No newline at end of file +logger.info(res.raw[0].member_id) # 4089680 diff --git a/test/test3.py b/test/test3.py deleted file mode 100644 index f86f8b01..00000000 --- a/test/test3.py +++ /dev/null @@ -1,15 +0,0 @@ -from loguru import logger - -from PicImageSearch import Ascii2D - -ascii2d = Ascii2D(bovw=False) -res = ascii2d.search('https://pixiv.cat/77702503-1.jpg') -#res = ascii2d.search(r'C:/kitUIN/img/tinted-good.jpg') # 搜索本地图片 -#logger.info(res.origin) # 原始数据 -#logger.info(res.raw) # -logger.info(res.raw[1]) # -logger.info(res.raw[1].thumbnail) # https://ascii2d.net/thumbnail/2/c/5/e/2c5e6a18fbba730a65cef0549e3c5768.jpg -logger.info(res.raw[1].title) # 2020.08.30 -logger.info(res.raw[1].authors) # hews__ -logger.info(res.raw[1].url) # https://twitter.com/hews__/status/1299728221005643776 -logger.info(res.raw[1].detail) # 2570x4096 JPEG 1087.9KB diff --git a/test/test5.py b/test/test5.py deleted file mode 100644 index 2c46f179..00000000 --- a/test/test5.py +++ /dev/null @@ -1,23 +0,0 @@ -from loguru import logger - -from PicImageSearch import Google - -google = Google() -res = google.search( - "https://media.discordapp.net/attachments/783138508038471701/813452582948306974/hl-18-1-900x1280.png?width=314&height=447") -# res = google.search(r'C:/kitUIN/img/tinted-good.jpg') # Search Image URL or path -logger.info(res.origin) # Original Data - -# Should start from index 2, because from there is matching image -logger.info(res.raw[ - 4]) # -logger.info(res.raw[4].thumbnail) # No directable url -logger.info(res.raw[4].title) # The Strongest Dull Prince's Secret Battle for the Throne ... -logger.info(res.raw[ - 4].url) # https://kiryuu.co/the-strongest-dull-princes-secret-battle-for-the-throne-chapter-3-bahasa-indonesia/ -logger.info(res.page) -res2 = google.goto_page(res.get_page_url(2), 2) -logger.info(res2.raw[2]) -logger.info(res2.raw[2].thumbnail) -logger.info(res2.raw[2].title) -logger.info(res2.raw[2].url) \ No newline at end of file diff --git a/test/test1.py b/test/tracemoe_test.py similarity index 97% rename from test/test1.py rename to test/tracemoe_test.py index ec057423..0e5c7ed3 100644 --- a/test/test1.py +++ b/test/tracemoe_test.py @@ -8,7 +8,7 @@ # 如果需要代理 } tracemoe = TraceMoe(mute=False, size=None) -res = tracemoe.search('https://trace.moe/img/tinted-good.jpg') # 搜索网络图片 +res = tracemoe.search("https://trace.moe/img/tinted-good.jpg") # 搜索网络图片 # res = tracemoe.search(r'C:/Users/kulujun/Pictures/1.png') # 搜索本地图片