Skip to content

Instantly share code, notes, and snippets.

@aucampia
Last active August 23, 2022 21:23
Show Gist options
  • Save aucampia/814546c08c118960740a7f91c6a9905d to your computer and use it in GitHub Desktop.
Save aucampia/814546c08c118960740a7f91c6a9905d to your computer and use it in GitHub Desktop.
task run -- python -m pip install --upgrade strip-hints black python-minifier
PYLOGGING_LEVEL=INFO task run -- git difftool -y -x $(readlink -f devtools/diffrtpy.py) upstream/master..origin/iwana-20220823T2105-typing_query_etc | tee /var/tmp/compact.diff
gh gist edit  https://gist.github.com/aucampia/814546c08c118960740a7f91c6a9905d -f compact.diff /var/tmp/compact.diff
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -168,13 +168,16 @@
- Added type hints.
- `rdflib.store` and builtin stores have mostly complete type hints.
[PR #2057](https://github.com/RDFLib/rdflib/pull/2057).
- `rdflib.graph` have mostly complete type hints.
[PR #2080](https://github.com/RDFLib/rdflib/pull/2080).
- - `rdflib.plugins.sparql.algebra` amd `rdflib.plugins.sparql.operators` have
+ - `rdflib.plugins.sparql.algebra` and `rdflib.plugins.sparql.operators` have
mostly complete type hints.
[PR #2094](https://github.com/RDFLib/rdflib/pull/2094).
+ - `rdflib.query` and `rdflib.plugins.sparql.results.*` have mostly complete
+ type hints.
+ [PR #2097](https://github.com/RDFLib/rdflib/pull/2097).
<!-- -->
<!-- -->
<!-- CHANGE BARRIER: END PR #2057 -->
--- a/rdflib/plugins/sparql/results/csvresults.py
+++ b/rdflib/plugins/sparql/results/csvresults.py
@@ -1,10 +1,12 @@
+from __future__ import annotations
import codecs
import csv
-from typing import IO
-from rdflib import BNode, Literal, URIRef, Variable
+from typing import IO, Dict, List, Optional, Union
+from rdflib.plugins.sparql.processor import SPARQLResult
from rdflib.query import Result, ResultParser, ResultSerializer
+from rdflib.term import BNode, Identifier, Literal, URIRef, Variable
class CSVResultParser(ResultParser):
def __init__(self):
self.delim = ","
--- a/rdflib/plugins/sparql/results/graph.py
+++ b/rdflib/plugins/sparql/results/graph.py
@@ -1,6 +1,8 @@
-from rdflib import Graph
+from __future__ import annotations
+from typing import IO, Optional
+from rdflib.graph import Graph
from rdflib.query import Result, ResultParser
class GraphResultParser(ResultParser):
def parse(self, source, content_type):
--- a/rdflib/plugins/sparql/results/jsonresults.py
+++ b/rdflib/plugins/sparql/results/jsonresults.py
@@ -1,9 +1,10 @@
+from __future__ import annotations
import json
-from typing import IO, Any, Dict
-from rdflib import BNode, Literal, URIRef, Variable
+from typing import IO, Any, Dict, Mapping, MutableSequence, Optional
from rdflib.query import Result, ResultException, ResultParser, ResultSerializer
+from rdflib.term import BNode, Identifier, Literal, URIRef, Variable
class JSONResultParser(ResultParser):
def parse(self, source, content_type=None):
inp = source.read()
--- a/rdflib/plugins/sparql/results/rdfresults.py
+++ b/rdflib/plugins/sparql/results/rdfresults.py
@@ -1,7 +1,10 @@
-from rdflib import RDF, Graph, Namespace, Variable
+from typing import IO, Any, MutableMapping, Optional, Union
+from rdflib.graph import Graph
+from rdflib.namespace import RDF, Namespace
from rdflib.query import Result, ResultParser
+from rdflib.term import Node, Variable
RS = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/result-set#")
class RDFResultParser(ResultParser):
--- a/rdflib/plugins/sparql/results/tsvresults.py
+++ b/rdflib/plugins/sparql/results/tsvresults.py
@@ -1,16 +1,17 @@
import codecs
+import typing
+from typing import IO, Union
from pyparsing import (
FollowedBy,
LineEnd,
Literal,
Optional,
ParserElement,
Suppress,
ZeroOrMore,
)
-from rdflib import Literal as RDFLiteral
from rdflib.plugins.sparql.parser import (
BLANK_NODE_LABEL,
IRIREF,
LANGTAG,
STRING_LITERAL1,
@@ -19,10 +20,13 @@
NumericLiteral,
Var,
)
from rdflib.plugins.sparql.parserutils import Comp, CompValue, Param
from rdflib.query import Result, ResultParser
+from rdflib.term import BNode
+from rdflib.term import Literal as RDFLiteral
+from rdflib.term import URIRef
ParserElement.setDefaultWhitespaceChars(" \n")
String = STRING_LITERAL1 | STRING_LITERAL2
RDFLITERAL = Comp(
"literal",
--- a/rdflib/plugins/sparql/results/txtresults.py
+++ b/rdflib/plugins/sparql/results/txtresults.py
@@ -1,10 +1,9 @@
-from typing import IO, List, Optional
-from rdflib import BNode, Literal, URIRef
+from typing import IO, List, Optional, Union
from rdflib.namespace import NamespaceManager
from rdflib.query import ResultSerializer
-from rdflib.term import Variable
+from rdflib.term import BNode, Literal, URIRef, Variable
def _termString(t, namespace_manager):
if t is None:
return "-"
--- a/rdflib/plugins/sparql/results/xmlresults.py
+++ b/rdflib/plugins/sparql/results/xmlresults.py
@@ -15,13 +15,12 @@
cast,
)
from xml.dom import XML_NAMESPACE
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesNSImpl
-from rdflib import BNode, Literal, URIRef, Variable
from rdflib.query import Result, ResultException, ResultParser, ResultSerializer
-from rdflib.term import Identifier
+from rdflib.term import BNode, Identifier, Literal, URIRef, Variable
try:
import lxml.etree as lxml_etree
FOUND_LXML = True
--- a/rdflib/query.py
+++ b/rdflib/query.py
@@ -1,10 +1,26 @@
+from __future__ import annotations
import itertools
import types
import warnings
from io import BytesIO
-from typing import IO, TYPE_CHECKING, List, Optional, Union, cast
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ BinaryIO,
+ Dict,
+ Iterator,
+ List,
+ Mapping,
+ MutableSequence,
+ Optional,
+ Tuple,
+ Union,
+ cast,
+ overload,
+)
from urllib.parse import urlparse
from urllib.request import url2pathname
__all__ = [
"Processor",
@@ -15,12 +31,13 @@
"ResultSerializer",
"ResultException",
"EncodeOnlyUnicode",
]
if TYPE_CHECKING:
- from rdflib.graph import Graph
- from rdflib.term import Variable
+ from rdflib.graph import Graph, _TripleType
+ from rdflib.plugins.sparql.sparql import Query, Update
+ from rdflib.term import Identifier, Variable
class Processor(object):
def __init__(self, graph):
pass
@@ -53,11 +70,11 @@
def __getattr__(self, name):
return getattr(self.__stream, name)
-class ResultRow(tuple):
+class ResultRow(Tuple[("Identifier", ...)]):
def __new__(cls, values, labels):
instance = super(ResultRow, cls).__new__(cls, (values.get(v) for v in labels))
instance.labels = dict(((str(x[1]), x[0]) for x in enumerate(labels)))
return instance
@@ -73,10 +90,18 @@
if name in self.labels:
return tuple.__getitem__(self, self.labels[name])
if str(name) in self.labels:
return tuple.__getitem__(self, self.labels[str(name)])
raise KeyError(name)
+
+ @overload
+ def get(self, name, default):
+ ...
+
+ @overload
+ def get(self, name, default=...):
+ ...
def get(self, name, default=None):
try:
return self[name]
except KeyError:
--- a/test/test_misc/test_plugins.py
+++ b/test/test_misc/test_plugins.py
@@ -4,16 +4,17 @@
import subprocess
import sys
import warnings
from contextlib import ExitStack, contextmanager
from pathlib import Path
-from typing import Any, Callable, Dict, Generator, List
+from typing import Any, Callable, Dict, Generator, List, cast
import rdflib.plugin
import rdflib.plugins.sparql
import rdflib.plugins.sparql.evaluate
from rdflib import Graph
from rdflib.parser import Parser
+from rdflib.query import ResultRow
TEST_DIR = Path(__file__).parent.parent
TEST_PLUGINS_DIR = TEST_DIR / "plugins"
@@ -80,11 +81,11 @@
"SELECT ?output1 WHERE { BIND(<" + ep_ns + "function>() AS ?output1) }"
)
logging.debug("query_string = %s", query_string)
result = graph.query(query_string)
assert result.type == "SELECT"
- rows = list(result)
+ rows = cast(List[ResultRow], list(result))
logging.debug("rows = %s", rows)
assert len(rows) == 1
assert len(rows[0]) == 1
assert rows[0][0] == plugin_module.function_result
assert [str(msg) for msg in warnings_record] == []
--- a/test/test_sparql/test_forward_slash_escapes.py
+++ b/test/test_sparql/test_forward_slash_escapes.py
@@ -4,10 +4,11 @@
from typing import Set
import pytest
from rdflib import Graph
from rdflib.plugins.sparql.processor import prepareQuery
from rdflib.plugins.sparql.sparql import Query
+from rdflib.query import ResultRow
query_string_expanded = "\nSELECT ?nIndividual\nWHERE {\n ?nIndividual a <http://example.org/ontology/core/MyClassB> .\n}"
query_string_prefixed = "\nPREFIX ex: <http://example.org/ontology/>\nSELECT ?nIndividual\nWHERE {\n # NOTE: Syntax is incorrect - forward slash cannot be included in\n # local component of name.\n ?nIndividual a ex:core\\/MyClassB .\n}"
PN_LOCAL_BACKSLASH_XFAIL_REASON = "\n Contrary to the ratified SPARQL 1.1 grammar, the RDFlib SPARQL propcessor\n accepts backslashes as part of PN_LOCAL which it treats as escape\n characters.\n\n There should be a way to instruct the SPARQL parser to operate in strict\n mode, and in strict mode backslashes should not be permitted in PN_LOCAL.\n\n See https://github.com/RDFLib/rdflib/issues/1871\n"
@@ -41,10 +42,11 @@
query_compiled = True
except Exception:
pass
assert expected_query_compiled == query_compiled
for result in graph.query(query_object):
+ assert isinstance(result, ResultRow)
computed.add(str(result[0]))
assert expected == computed
def test_escapes_and_query_turtle_expanded():
--- a/test/test_sparql/test_sparql.py
+++ b/test/test_sparql/test_sparql.py
@@ -14,11 +14,11 @@
from rdflib.plugins.sparql.evaluate import evalPart
from rdflib.plugins.sparql.evalutils import _eval
from rdflib.plugins.sparql.parser import parseQuery
from rdflib.plugins.sparql.parserutils import prettify_parsetree
from rdflib.plugins.sparql.sparql import SPARQLError
-from rdflib.query import Result
+from rdflib.query import Result, ResultRow
from rdflib.term import Identifier, Variable
def test_graph_prefix():
g1 = Graph()
@@ -208,10 +208,11 @@
query_string = '\n SELECT ?output0 WHERE {\n BIND(CONCAT("a", " + ", "b") AS ?output0)\n }\n '
result = graph.query(query_string)
assert result.type == "SELECT"
rows = list(result)
assert len(rows) == 1
+ assert isinstance(rows[0], ResultRow)
assert len(rows[0]) == 1
assert rows[0][0] == Literal("a + b")
def test_custom_eval():
@@ -247,10 +248,11 @@
query_string = '\n PREFIX eg: <http://example.com/>\n SELECT ?output0 ?output1 WHERE {\n BIND(CONCAT("a", " + ", "b") AS ?output0)\n BIND(eg:function() AS ?output1)\n }\n '
result = graph.query(query_string)
assert result.type == "SELECT"
rows = list(result)
assert len(rows) == 1
+ assert isinstance(rows[0], ResultRow)
assert len(rows[0]) == 2
assert rows[0][0] == Literal("a + b")
assert rows[0][1] == custom_function_result
finally:
rdflib.plugins.sparql.CUSTOM_EVALS["test_function"]
--- a/test/test_sparql/test_tsvresults.py
+++ b/test/test_sparql/test_tsvresults.py
@@ -1,11 +1,13 @@
from io import StringIO
from rdflib.plugins.sparql.results.tsvresults import TSVResultParser
+from rdflib.query import ResultRow
def test_empty_tsvresults_bindings():
source = "?s\t?p\t?o\n \t<urn:p>\t<urn:o>\n <urn:s>\t\t<urn:o>\n <urn:s>\t<urn:p>\t"
parser = TSVResultParser()
source_io = StringIO(source)
result = parser.parse(source_io)
for (idx, row) in enumerate(result):
+ assert isinstance(row, ResultRow)
assert row[idx] is None
--- a/test/test_typing.py
+++ b/test/test_typing.py
@@ -1,10 +1,11 @@
#!/usr/bin/env python3
from typing import Set, Tuple
import rdflib
import rdflib.plugins.sparql.processor
-from rdflib.term import Node
+from rdflib.query import ResultRow
+from rdflib.term import IdentifiedNode, Identifier, Node
def test_rdflib_query_exercise():
graph = rdflib.Graph()
literal_one = rdflib.Literal("1", datatype=rdflib.XSD.integer)
@@ -30,30 +31,34 @@
assert expected_nodes_using_predicate_q == computed_nodes_using_predicate_q
one_usage_query = "SELECT ?resource\nWHERE {\n ?resource <http://example.org/predicates#p> 1 .\n}\n"
expected_one_usage = {kb_bnode, kb_http_uriref, kb_https_uriref, kb_urn_uriref}
computed_one_usage = set()
for one_usage_result in graph.query(one_usage_query):
+ assert isinstance(one_usage_result, ResultRow)
computed_one_usage.add(one_usage_result[0])
assert expected_one_usage == computed_one_usage
two_usage_query = 'SELECT ?resource ?predicate\nWHERE {\n ?resource ?predicate "2"^^xsd:integer .\n}\n'
expected_two_usage = {
(kb_https_uriref, predicate_p),
(kb_https_uriref, predicate_q),
}
computed_two_usage = set()
for two_usage_result in graph.query(two_usage_query):
+ assert isinstance(two_usage_result, ResultRow)
computed_two_usage.add(two_usage_result)
assert expected_two_usage == computed_two_usage
nsdict = {k: v for (k, v) in graph.namespace_manager.namespaces()}
prepared_one_usage_query = rdflib.plugins.sparql.processor.prepareQuery(
one_usage_query, initNs=nsdict
)
computed_one_usage_from_prepared_query = set()
for prepared_one_usage_result in graph.query(prepared_one_usage_query):
+ assert isinstance(prepared_one_usage_result, ResultRow)
computed_one_usage_from_prepared_query.add(prepared_one_usage_result[0])
assert expected_one_usage == computed_one_usage_from_prepared_query
for node_using_one in sorted(computed_one_usage):
+ assert isinstance(node_using_one, IdentifiedNode)
graph.add((node_using_one, predicate_r, literal_true))
python_one = literal_one.toPython()
assert python_one == 1
python_two = literal_two.toPython()
assert python_two == 2
--- a/test/utils/sparql_checker.py
+++ b/test/utils/sparql_checker.py
@@ -358,10 +358,12 @@
expected_result.bindings, result.bindings, skip_duplicates=lax_cardinality
)
elif result.type == ResultType.ASK:
assert expected_result.askAnswer == result.askAnswer
else:
+ assert expected_result.graph is not None
+ assert result.graph is not None
logging.debug(
"expected_result.graph = %s, result.graph = %s\n%s",
expected_result.graph,
result.graph,
result.graph.serialize(format=expected_result_format),
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment