Skip to content

Commit

Permalink
refactor: Type hinting update
Browse files Browse the repository at this point in the history
  • Loading branch information
SilverRainZ committed Oct 19, 2024
1 parent b70dc37 commit efda73a
Show file tree
Hide file tree
Showing 9 changed files with 50 additions and 62 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ classifiers = [
"Topic :: Utilities",
]

requires-python = ">=3.8"
requires-python = ">=3.12"
dependencies = [
"Sphinx >= 4",
"langid",
Expand Down
31 changes: 15 additions & 16 deletions src/sphinxnotes/snippet/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
"""

from __future__ import annotations
from typing import List, Tuple, Dict, Optional
from dataclasses import dataclass

from .snippets import Snippet
Expand All @@ -18,25 +17,25 @@ class Item(object):
"""Item of snippet cache."""

snippet: Snippet
tags: List[str]
tags: list[str]
excerpt: str
titlepath: List[str]
keywords: List[str]
titlepath: list[str]
keywords: list[str]


DocID = Tuple[str, str] # (project, docname)
DocID = tuple[str, str] # (project, docname)
IndexID = str # UUID
Index = Tuple[str, str, List[str], List[str]] # (tags, excerpt, titlepath, keywords)
Index = tuple[str, str, list[str], list[str]] # (tags, excerpt, titlepath, keywords)


class Cache(PDict):
"""A DocID -> List[Item] Cache."""
"""A DocID -> list[Item] Cache."""

indexes: Dict[IndexID, Index]
index_id_to_doc_id: Dict[IndexID, Tuple[DocID, int]]
doc_id_to_index_ids: Dict[DocID, List[IndexID]]
num_snippets_by_project: Dict[str, int]
num_snippets_by_docid: Dict[DocID, int]
indexes: dict[IndexID, Index]
index_id_to_doc_id: dict[IndexID, tuple[DocID, int]]
doc_id_to_index_ids: dict[DocID, list[IndexID]]
num_snippets_by_project: dict[str, int]
num_snippets_by_docid: dict[DocID, int]

def __init__(self, dirname: str) -> None:
self.indexes = {}
Expand All @@ -46,7 +45,7 @@ def __init__(self, dirname: str) -> None:
self.num_snippets_by_docid = {}
super().__init__(dirname)

def post_dump(self, key: DocID, items: List[Item]) -> None:
def post_dump(self, key: DocID, items: list[Item]) -> None:
"""Overwrite PDict.post_dump."""

# Remove old indexes and index IDs if exists
Expand Down Expand Up @@ -74,7 +73,7 @@ def post_dump(self, key: DocID, items: List[Item]) -> None:
self.num_snippets_by_docid[key] = 0
self.num_snippets_by_docid[key] += len(items)

def post_purge(self, key: DocID, items: List[Item]) -> None:
def post_purge(self, key: DocID, items: list[Item]) -> None:
"""Overwrite PDict.post_purge."""

# Purge indexes
Expand All @@ -90,7 +89,7 @@ def post_purge(self, key: DocID, items: List[Item]) -> None:
if self.num_snippets_by_docid[key] == 0:
del self.num_snippets_by_docid[key]

def get_by_index_id(self, key: IndexID) -> Optional[Item]:
def get_by_index_id(self, key: IndexID) -> Item | None:
"""Like get(), but use IndexID as key."""
doc_id, item_index = self.index_id_to_doc_id.get(key, (None, None))
if not doc_id:
Expand All @@ -103,6 +102,6 @@ def gen_index_id(self) -> str:

return uuid.uuid4().hex[:7]

def stringify(self, key: DocID, items: List[Item]) -> str:
def stringify(self, key: DocID, items: list[Item]) -> str:
"""Overwrite PDict.stringify."""
return key[1]
6 changes: 3 additions & 3 deletions src/sphinxnotes/snippet/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import os
from os import path
import argparse
from typing import List, Iterable, Tuple
from typing import Iterable
from textwrap import dedent
from shutil import get_terminal_size
import posixpath
Expand Down Expand Up @@ -51,7 +51,7 @@ def get_integration_file(fn: str) -> str:
return path.join(prefix, 'integration', fn)


def main(argv: List[str] = sys.argv[1:]):
def main(argv: list[str] = sys.argv[1:]):
"""Command line entrypoint."""

parser = argparse.ArgumentParser(
Expand Down Expand Up @@ -237,7 +237,7 @@ def _on_command_stat(args: argparse.Namespace):

def _filter_list_items(
cache: Cache, tags: str, docname_glob: str
) -> Iterable[Tuple[IndexID, Index]]:
) -> Iterable[tuple[IndexID, Index]]:
# NOTE: Importing is slow, do it on demand.
from sphinx.util.matching import patmatch

Expand Down
18 changes: 9 additions & 9 deletions src/sphinxnotes/snippet/ext.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
"""

from __future__ import annotations
from typing import List, Set, TYPE_CHECKING, Dict
from typing import TYPE_CHECKING
import re
from os import path
import time
Expand Down Expand Up @@ -56,7 +56,7 @@ def extract_excerpt(s: Snippet) -> str:
return ''


def extract_keywords(s: Snippet) -> List[str]:
def extract_keywords(s: Snippet) -> list[str]:
keywords = [s.docname]
# TODO: Deal with more snippet
if isinstance(s, WithTitle) and s.title is not None:
Expand All @@ -65,8 +65,8 @@ def extract_keywords(s: Snippet) -> List[str]:


def is_document_matched(
pats: Dict[str, List[str]], docname: str
) -> Dict[str, List[str]]:
pats: dict[str, list[str]], docname: str
) -> dict[str, list[str]]:
"""Whether the docname matched by given patterns pats"""
new_pats = {}
for tag, ps in pats.items():
Expand All @@ -76,7 +76,7 @@ def is_document_matched(
return new_pats


def is_snippet_matched(pats: Dict[str, List[str]], s: [Snippet], docname: str) -> bool:
def is_snippet_matched(pats: dict[str, list[str]], s: [Snippet], docname: str) -> bool:
"""Whether the snippet's tags and docname matched by given patterns pats"""
if '*' in pats: # Wildcard
for pat in pats['*']:
Expand Down Expand Up @@ -108,10 +108,10 @@ def on_config_inited(app: Sphinx, appcfg: SphinxConfig) -> None:
def on_env_get_outdated(
app: Sphinx,
env: BuildEnvironment,
added: Set[str],
changed: Set[str],
removed: Set[str],
) -> List[str]:
added: set[str],
changed: set[str],
removed: set[str],
) -> list[str]:
# Remove purged indexes and snippetes from db
for docname in removed:
del cache[(app.config.project, docname)]
Expand Down
13 changes: 6 additions & 7 deletions src/sphinxnotes/snippet/keyword.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
"""

from __future__ import annotations
from typing import List, Optional
import string
from collections import Counter

Expand Down Expand Up @@ -47,8 +46,8 @@ def __init__(self):
)

def extract(
self, text: str, top_n: Optional[int] = None, strip_stopwords: bool = True
) -> List[str]:
self, text: str, top_n: int | None = None, strip_stopwords: bool = True
) -> list[str]:
"""Return keywords of given text."""
# TODO: zh -> en
# Normalize
Expand Down Expand Up @@ -87,7 +86,7 @@ def normalize(self, text: str) -> str:
text = text.replace('\n', ' ')
return text

def tokenize(self, text: str) -> List[str]:
def tokenize(self, text: str) -> list[str]:
# Get top most 5 langs
langs = self._detect_langs(text)[:5]
tokens = [text]
Expand All @@ -104,16 +103,16 @@ def tokenize(self, text: str) -> List[str]:
new_tokens = []
return tokens

def trans_to_pinyin(self, word: str) -> Optional[str]:
def trans_to_pinyin(self, word: str) -> str | None:
return ' '.join(self._pinyin(word, errors='ignore'))

def strip_stopwords(self, words: List[str]) -> List[str]:
def strip_stopwords(self, words: list[str]) -> list[str]:
stw = self._stopwords(['en', 'zh'])
new_words = []
for word in words:
if word not in stw:
new_words.append(word)
return new_words

def strip_invalid_token(self, tokens: List[str]) -> List[str]:
def strip_invalid_token(self, tokens: list[str]) -> list[str]:
return [token for token in tokens if token != '']
25 changes: 8 additions & 17 deletions src/sphinxnotes/snippet/snippets.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
"""

from __future__ import annotations
from typing import List, Tuple, Optional, TYPE_CHECKING
from typing import TYPE_CHECKING
import itertools
from os import path

Expand All @@ -36,16 +36,16 @@ class Snippet(object):

#: Line number range of snippet, in the source file which is left closed
#: and right opened.
lineno: Tuple[int, int]
lineno: tuple[int, int]

#: The original reStructuredText of snippet
rst: List[str]
rst: list[str]

#: The possible identifier key of snippet, which is picked from nodes'
#: (or nodes' parent's) `ids attr`_.
#:
#: .. _ids attr: https://docutils.sourceforge.io/docs/ref/doctree.html#ids
refid: Optional[str]
refid: str | None

def __init__(self, *nodes: nodes.Node) -> None:
assert len(nodes) != 0
Expand Down Expand Up @@ -94,11 +94,11 @@ def __init__(self, node: nodes.Node) -> None:
self.text = node.astext()


class CodeBlock(Text):
class Code(Text):
#: Language of code block
language: str
#: Caption of code block
caption: Optional[str]
caption: str | None

def __init__(self, node: nodes.literal_block) -> None:
assert isinstance(node, nodes.literal_block)
Expand All @@ -107,23 +107,14 @@ def __init__(self, node: nodes.literal_block) -> None:
self.caption = node.get('caption')


class WithCodeBlock(object):
code_blocks: List[CodeBlock]

def __init__(self, nodes: nodes.Nodes) -> None:
self.code_blocks = []
for n in nodes.traverse(nodes.literal_block):
self.code_blocks.append(self.CodeBlock(n))


class Title(Text):
def __init__(self, node: nodes.title) -> None:
assert isinstance(node, nodes.title)
super().__init__(node)


class WithTitle(object):
title: Optional[Title]
title: Title | None

def __init__(self, node: nodes.Node) -> None:
title_node = node.next_node(nodes.title)
Expand Down Expand Up @@ -178,7 +169,7 @@ def _line_of_start(node: nodes.Node) -> int:
return node.line


def _line_of_end(node: nodes.Node) -> Optional[int]:
def _line_of_end(node: nodes.Node) -> int | None:
next_node = node.next_node(descend=False, siblings=True, ascend=True)
while next_node:
if next_node.line:
Expand Down
4 changes: 2 additions & 2 deletions src/sphinxnotes/snippet/table.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
"""

from __future__ import annotations
from typing import Iterable, Tuple
from typing import Iterable

from .cache import Index, IndexID
from .utils import ellipsis
Expand All @@ -17,7 +17,7 @@
COLUMN_DELIMITER = ' '


def tablify(indexes: Iterable[Tuple[IndexID, Index]], width: int) -> Iterable[str]:
def tablify(indexes: Iterable[tuple[IndexID, Index]], width: int) -> Iterable[str]:
"""Create a table from sequence of indices"""

# Calcuate width
Expand Down
5 changes: 2 additions & 3 deletions src/sphinxnotes/snippet/utils/ellipsis.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,11 @@
"""

from __future__ import annotations
from typing import List
from wcwidth import wcswidth


def ellipsis(
text: str, width: int, ellipsis_sym: str = '..', blank_sym: str = None
text: str, width: int, ellipsis_sym: str = '..', blank_sym: str | None = None
) -> str:
text_width = wcswidth(text)
if text_width <= width:
Expand All @@ -34,7 +33,7 @@ def ellipsis(


def join(
lst: List[str],
lst: list[str],
total_width: int,
title_width: int,
separate_sym: str = '/',
Expand Down
8 changes: 4 additions & 4 deletions src/sphinxnotes/snippet/utils/titlepath.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
"""

from __future__ import annotations
from typing import List, TYPE_CHECKING
from typing import TYPE_CHECKING

from docutils import nodes

Expand All @@ -19,11 +19,11 @@

def resolve(
env: BuilderEnviornment, docname: str, node: nodes.Node
) -> List[nodes.title]:
) -> list[nodes.title]:
return resolve_section(node) + resolve_document(env, docname)


def resolve_section(node: nodes.section) -> List[nodes.title]:
def resolve_section(node: nodes.section) -> list[nodes.title]:
# FIXME: doc is None
titlenodes = []
while node:
Expand All @@ -33,7 +33,7 @@ def resolve_section(node: nodes.section) -> List[nodes.title]:
return titlenodes


def resolve_document(env: BuilderEnviornment, docname: str) -> List[nodes.title]:
def resolve_document(env: BuilderEnviornment, docname: str) -> list[nodes.title]:
"""
.. note:: Title of document itself does not included in the returned list
"""
Expand Down

0 comments on commit efda73a

Please sign in to comment.