zzzeeksphinx-1.6.0/zzzeeksphinx/__init__.py 0000644 0000000 0000000 00000001260 13615410400 016076 0 ustar 00 __version__ = "1.6.0" def setup(app): from . import ( autodoc_mods, dialect_info, mako, sqlformatter, viewsource, scss, render_pydomains, extras, ) # we use jquery. See # https://www.sphinx-doc.org/en/master/changes.html#id65 app.setup_extension("sphinxcontrib.jquery") autodoc_mods.setup(app) dialect_info.setup(app) mako.setup(app) sqlformatter.setup(app) viewsource.setup(app) scss.setup(app) render_pydomains.setup(app) extras.setup(app) return { "version": __version__, "parallel_read_safe": True, "parallel_write_safe": True, } zzzeeksphinx-1.6.0/zzzeeksphinx/autodoc_mods.py 0000644 0000000 0000000 00000040622 13615410400 017024 0 ustar 00 import inspect import re from docutils import nodes from sphinx import addnodes from sphinx.util import logging LOG = logging.getLogger(__name__) def autodoc_skip_member(app, what, name, obj, skip, options): # sphinx is putting blank __init__ methods, not sure why. # even if I turn off all the extensions here if ( what == "class" and name == "__init__" and (not inspect.isfunction(obj) or not obj.__doc__) ): return True if ( what == "class" and skip and name in ("__init__", "__eq__", "__ne__", "__lt__", "__le__", "__call__") and obj.__doc__ and getattr(obj, "__objclass__", None) not in (type, object, list, tuple, dict) ): return False else: return skip def _adjust_rendered_mod_name(config, modname, objname): if (modname, objname) in config.autodocmods_convert_modname_w_class: return config.autodocmods_convert_modname_w_class[(modname, objname)] elif modname in config.autodocmods_convert_modname: return config.autodocmods_convert_modname[modname] else: return modname # im sure this is in the app somewhere, but I don't really # know where, so we're doing it here. _track_autodoced = {} _inherited_names = set() def _superclass_classstring( adjusted_mod, base, tilde=False, pytype="class", attrname=None ): dont_link = ( base.__module__ == "builtins" or base.__name__.startswith("_") or (attrname and attrname.startswith("_")) ) attrname = ".%s" % attrname if attrname else "" if dont_link: return "``%s.%s%s``" % (adjusted_mod, base.__name__, attrname) else: return ":%s:`%s%s.%s%s`" % ( pytype, "~" if tilde else "", adjusted_mod, base.__name__, attrname, ) def _quick_inspect_sig( args, varargs=None, varkw=None, defaults=None, kwonlyargs=(), kwonlydefaults={}, annotations={}, formatarg=str, formatvarargs=lambda name: "*" + name, formatvarkw=lambda name: "**" + name, ): specs = [] if defaults: firstdefault = len(args) - len(defaults) close_bracket = False for i, arg in enumerate(args): if i > 3: specs[-1] += ", ..." break spec = formatarg(arg) if defaults and i >= firstdefault and not close_bracket: if specs: specs[-1] = specs[-1] + "[" else: spec = "[" + spec close_bracket = True specs.append(spec) if close_bracket: specs[-1] = specs[-1] + "]" close_bracket = False if varargs is not None: specs.append(formatvarargs(formatarg(varargs))) else: if kwonlyargs: specs.append("*") if kwonlyargs: for kwonlyarg in kwonlyargs: spec = formatarg(kwonlyarg) if ( kwonlydefaults and kwonlyarg in kwonlydefaults and not close_bracket ): close_bracket = True spec = "[" + spec specs.append(spec) if close_bracket: specs[-1] = specs[-1] + "]" close_bracket = False if varkw is not None: specs.append(formatvarkw(formatarg(varkw))) result = "(" + ", ".join(specs) + ")" return result def write_autosummaries(app, doctree): for idx, node in enumerate(doctree.traverse(nodes.section)): immediate_autodoc_nodes = [ n for n in node.traverse(addnodes.desc) if n.parent is node and n.attributes.get("objtype", None) in ("attribute", "data", "class", "function") ] if not immediate_autodoc_nodes: continue where = node.index(immediate_autodoc_nodes[0]) immediate_autodoc_nodes = sorted( immediate_autodoc_nodes, key=lambda node: node[0].attributes["fullname"].lower(), ) table = nodes.table("", classes=["longtable"]) group = nodes.tgroup("", cols=2) table.append(group) group.append(nodes.colspec("", colwidth=10)) group.append(nodes.colspec("", colwidth=90)) header = nodes.thead("") header.append( nodes.row( "", nodes.entry("", nodes.Text("Object Name", "Object Name")), nodes.entry("", nodes.Text("Description", "Description")), ) ) group.append(header) body = nodes.tbody("") group.append(body) for ad_node in immediate_autodoc_nodes: # what = ad_node.attributes["objtype"] sig = ad_node.children[0] ids = sig.attributes.get("ids", [None]) if not ids: continue refid = ids[0] if not refid: continue row = nodes.row("") obj = _track_autodoced.get(refid, None) if inspect.isfunction(obj): param_str = _quick_inspect_sig(*inspect.getfullargspec(obj)) else: param_str = "" name_node = list(sig.traverse(addnodes.desc_name)) if name_node: name_node = name_node[0] else: continue name_node = name_node.deepcopy() # nodes.literal( # "", *[c.copy() for c in name_node.children] # ) p = nodes.paragraph( "", "", # nodes.Text(what + " ", what + " "), nodes.reference( "", "", name_node, refid=refid, classes=["reference", "internal"], ), nodes.Text(param_str, param_str), ) row.append(nodes.entry("", p, classes=["autosummary-name"])) try: para = ad_node[1][0] if isinstance(para, nodes.paragraph): text = para.deepcopy() else: text = nodes.Text("", "") except IndexError: text = nodes.Text("", "") if ad_node.attributes.get("objtype") == "class": member_nodes = [] for attr_desc in ad_node.traverse(addnodes.desc): objtype = attr_desc.attributes.get("objtype") if objtype not in ("classmethod", "method", "attribute"): continue attr_sig = attr_desc.children[0] attr_ids = attr_sig.attributes.get("ids", [None]) if not attr_ids: continue attr_ref_id = attr_ids[0] if not attr_ref_id: continue attr_name_node = list( attr_desc.traverse(addnodes.desc_name) )[0] attr_name_node = attr_name_node.deepcopy() if objtype in ("classmethod", "method"): attr_name_node.append(nodes.Text("()")) attr_ref = nodes.reference( "", "", attr_name_node, refid=attr_ref_id, classes=["reference", "internal"], ) member_nodes.append(attr_ref) if member_nodes: method_list = nodes.paragraph("", "", member_nodes[0]) for ref in member_nodes[1:]: method_list.append(nodes.Text(", ")) method_list.append(ref) method_box = nodes.container( "", nodes.paragraph( "", "", nodes.strong("", nodes.Text("Members")) ), method_list, classes=["class-members"], ) content = ad_node.traverse(addnodes.desc_content) if content: content = list(content)[0] for i, n in enumerate(content.children): if isinstance(n, (addnodes.index, addnodes.desc)): content.insert(i - 1, method_box) break entry = nodes.entry("", text) row.append(entry) body.append(row) if where > 0: node.insert(where, table) def fix_up_autodoc_headers(app, doctree): for idx, node in enumerate(doctree.traverse(addnodes.desc)): objtype = node.attributes.get("objtype") if objtype in ("method", "attribute"): sig = node.children[0] modname = sig.attributes["module"] clsname = sig.attributes["class"] qualified = "%s.%s." % (modname, clsname) start_index = 0 is_classmethod = False if sig[0].rawsource == "async ": start_index = 1 elif "classmethod" in sig[0].rawsource: is_classmethod = True start_index = 1 sig.insert( start_index, nodes.reference( "", "", nodes.literal(qualified, qualified), refid="%s.%s" % (modname, clsname), ), ) # sphinx seems to put the qualifier "classmethod" for classmethods, # so don't add our "method" qualifier in that case if not is_classmethod: sig.insert( start_index, addnodes.desc_annotation( objtype, nodes.Text(objtype + " ", objtype + " ") ), ) elif objtype == "function": sig = node.children[0] start_index = 0 if sig[0].rawsource == "async ": start_index = 1 sig.insert( start_index, addnodes.desc_annotation( objtype, nodes.Text(objtype + " ", objtype + " ") ), ) def autodoc_process_signature( app, what, name, obj, options, signature, return_annotation ): # a fixer for return annotations that seem to be fully module-qualified # if the return class is outside of any brackets. if what in ("function", "method", "attribute") and return_annotation: m = re.match(r"^(.*?)\.([\w_]+)$", return_annotation) if m: modname, objname = m.group(1, 2) config = app.env.config if modname in config.autodocmods_convert_modname: modname = config.autodocmods_convert_modname[modname] new_return_annotation = "%s.%s" % (modname, objname) return_annotation = new_return_annotation return (signature, return_annotation) def autodoc_process_docstring(app, what, name, obj, options, lines): # skipping superclass classlevel docs for now, as these # get in the way of using autosummary. if what in ("class", "exception"): _track_autodoced[name] = obj # need to translate module names for bases, others # as we document lots of symbols in namespace modules # outside of their source bases = [] try: obj_bases = obj.__bases__ except AttributeError: LOG.warn( "Object %s is not a class, " "cannot be corrected by zzzeeksphinx", obj, ) return for base in obj_bases: if base is not object: adjusted_mod = _adjust_rendered_mod_name( app.env.config, base.__module__, base.__name__ ) bases.append(_superclass_classstring(adjusted_mod, base)) _inherited_names.add("%s.%s" % (adjusted_mod, base.__name__)) if bases: modname, objname = re.match(r"(.*)\.(.*?)$", name).group(1, 2) adjusted_mod = _adjust_rendered_mod_name( app.env.config, modname, objname ) clsdoc = _superclass_classstring(adjusted_mod, obj) lines.extend( [ "", ".. container:: class_bases", " " "", " **Class signature**", "", " class %s (%s)" % (clsdoc, ", ".join(bases)), "", ] ) elif what in ("attribute", "method"): m = re.match(r"(.*?)\.([\w_]+)$", name) if m: clsname, attrname = m.group(1, 2) if clsname in _track_autodoced: cls = _track_autodoced[clsname] found = False for supercls in cls.__mro__: if attrname in supercls.__dict__: found = True break if found and supercls is not cls and supercls is not object: adjusted_mod = _adjust_rendered_mod_name( app.env.config, supercls.__module__, supercls.__name__ ) _inherited_names.add( "%s.%s" % (adjusted_mod, supercls.__name__) ) _inherited_names.add( "%s.%s.%s" % (adjusted_mod, supercls.__name__, attrname) ) lines[:0] = [ ".. container:: inherited_member", "", " *inherited from the* " "%s *%s of* %s" % ( _superclass_classstring( adjusted_mod, supercls, attrname=attrname, pytype=( "attr" if what == "attribute" else "meth" ), tilde=True, ), what, _superclass_classstring( adjusted_mod, supercls, tilde=True ), ), "", ] elif what == "function": _track_autodoced[name] = obj def missing_reference(app, env, node, contnode): if node.attributes["reftarget"] in _inherited_names: return node.children[0] else: return None def work_around_issue_6785(): """See https://github.com/sphinx-doc/sphinx/issues/6785""" from sphinx.ext import autodoc # check some assumptions, more as a way of testing if this code changes # on the sphinx side assert ( # our change is already there...as in autobuild autodoc.PropertyDocumenter.priority == -100 or # this is how things start out when you first run Sphinx autodoc.PropertyDocumenter.priority > autodoc.AttributeDocumenter.priority ), ( f"autodoc.PropertyDocumenter.priority = " f"{autodoc.PropertyDocumenter.priority}, " f"autodoc.AttributeDocumenter.priority=" f"{autodoc.AttributeDocumenter.priority}" ) autodoc.PropertyDocumenter.priority = -100 def work_around_issue_10351(): """disable all @overload parsing see https://github.com/sphinx-doc/sphinx/issues/10351 """ from sphinx.pycode import parser def add_overload_entry(self, func): pass parser.VariableCommentPicker.add_overload_entry = add_overload_entry def setup(app): work_around_issue_6785() work_around_issue_10351() app.connect("autodoc-skip-member", autodoc_skip_member) app.connect("autodoc-process-docstring", autodoc_process_docstring) app.connect("autodoc-process-signature", autodoc_process_signature) app.connect("doctree-read", fix_up_autodoc_headers) app.connect("doctree-read", write_autosummaries) app.add_config_value("autodocmods_convert_modname", {}, "env") app.add_config_value("autodocmods_convert_modname_w_class", {}, "env") app.connect("missing-reference", missing_reference) zzzeeksphinx-1.6.0/zzzeeksphinx/dialect_info.py 0000644 0000000 0000000 00000030514 13615410400 016763 0 ustar 00 import re from docutils import nodes from docutils.parsers.rst import directives from docutils.parsers.rst.directives.tables import align from docutils.parsers.rst.directives.tables import ListTable from docutils.statemachine import StringList from sphinx.util.docutils import SphinxDirective # see https://www.sphinx-doc.org/en/master/development/tutorials/todo.html class DialectDirective(SphinxDirective): has_content = True _dialects = {} def _parse_content(self): d = {} d["default"] = self.content[0] d["text"] = [] idx = 0 for line in self.content[1:]: idx += 1 m = re.match(r"\:(.+?)\: +(.+)", line) if m: attrname, value = m.group(1, 2) d[attrname] = value else: break d["text"] = self.content[idx + 1 :] return d def _dbapi_node(self): dialect_name, dbapi_name = self.dialect_name.split("+") try: dialect_directive = self._dialects[dialect_name] except KeyError: raise Exception( "No .. dialect:: %s " "directive has been established" % dialect_name ) output = [] content = self._parse_content() # in sphinx 5.1.1 and earlier, we did this: # old_parent_section_ref = self.state.parent.children[0]["ids"][0] # however in 5.3.0, parent is an empty section. not clear if this is # due to content changes or changes in how automodule works, etc. # so now we manufacture it as follows: parent_section_ref = ( f"module-sqlalchemy.dialects.{dialect_name}.{dbapi_name}" ) # if automodule's naming scheme changes etc., this would silently # break self._append_dbapi_bullet( dialect_name, dbapi_name, content["name"], parent_section_ref ) p = nodes.paragraph( "", "", nodes.Text( "Support for the %s database via the %s driver." % (dialect_directive.database_name, content["name"]), "Support for the %s database via the %s driver." % (dialect_directive.database_name, content["name"]), ), ) self.state.nested_parse(content["text"], 0, p) output.append(p) if "url" in content or "driverurl" in content: sec = nodes.section( "", nodes.title("DBAPI", "DBAPI"), ids=["dialect-%s-%s-url" % (dialect_name, dbapi_name)], ) if "url" in content: text = ( "Documentation and download information " "(if applicable) " "for %s is available at:\n" % content["name"] ) uri = content["url"] sec.append( nodes.paragraph( "", "", nodes.Text(text, text), nodes.reference( "", "", nodes.Text(uri, uri), refuri=uri ), ) ) if "driverurl" in content: text = "Drivers for this database are available at:\n" sec.append( nodes.paragraph( "", "", nodes.Text(text, text), nodes.reference( "", "", nodes.Text( content["driverurl"], content["driverurl"] ), refuri=content["driverurl"], ), ) ) output.append(sec) if "connectstring" in content: sec = nodes.section( "", nodes.title("Connecting", "Connecting"), nodes.paragraph( "", "", nodes.Text("Connect String:", "Connect String:"), nodes.literal_block( content["connectstring"], content["connectstring"] ), ), ids=["dialect-%s-%s-connect" % (dialect_name, dbapi_name)], ) output.append(sec) return output def _build_supported_version_table(self, content): if not any( k in content for k in ("full_support", "normal_support", "best_effort") ): return [] text = ["* - Support type", " - Versions"] if "full_support" in content: text.append("* - :term:`Fully tested in CI`") text.append(" - %s" % content["full_support"]) if "normal_support" in content: text.append("* - :term:`Supported version`") text.append(" - %s" % content["normal_support"]) if "best_effort" in content: text.append("* - :term:`Best effort`") text.append(" - %s" % content["best_effort"]) list_table = ListTable( name="list-table", arguments=["**Supported %s versions**" % content["name"]], options={"header-rows": 1}, content=StringList(text), lineno=self.lineno, content_offset=self.content_offset, block_text="", state=self.state, state_machine=self.state_machine, ) return list_table.run() def _dialect_node(self): self._dialects[self.dialect_name] = self content = self._parse_content() self.database_name = content["name"] self.bullets = nodes.bullet_list() text = ( "The following dialect/DBAPI options are available. " "Please refer to individual DBAPI sections " "for connect information." ) try: table = self._build_supported_version_table(content) except Exception: table = [] if table: table = [nodes.paragraph("", "", *table)] # add the dialect to the recap table only if the dialect # has information to show there if not hasattr(self.env, "dialect_data"): self.env.dialect_data = [] content["sphinx_docname"] = self.env.docname self.env.dialect_data.append(content) sec = nodes.section( "", nodes.paragraph( "", "", nodes.Text( "Support for the %s database." % content["name"], "Support for the %s database." % content["name"], ), ), nodes.paragraph( "", "", nodes.Text( "The following table summarizes current support " "levels for database release versions.", "The following table summarizes current support " "levels for database release versions.", ), ), *table, nodes.title("DBAPI Support", "DBAPI Support"), nodes.paragraph("", "", nodes.Text(text, text), self.bullets), ids=["dialect-%s" % self.dialect_name], ) return [sec] def _append_dbapi_bullet(self, dialect_name, dbapi_name, name, idname): dialect_directive = self._dialects[dialect_name] try: relative_uri = self.env.app.builder.get_relative_uri( dialect_directive.docname, self.docname ) except: relative_uri = "" list_node = nodes.list_item( "", nodes.paragraph( "", "", nodes.reference( "", "", nodes.Text(name, name), refdocname=self.docname, refuri=relative_uri + "#" + idname, ), ), ) dialect_directive.bullets.append(list_node) def run(self): self.docname = self.env.docname self.dialect_name = dialect_name = self.content[0] has_dbapi = "+" in dialect_name if has_dbapi: return self._dbapi_node() else: return self._dialect_node() class dialecttable(nodes.General, nodes.Element): pass class DialectTableDirective(SphinxDirective): has_content = True # from ListTable final_argument_whitespace = True optional_arguments = 1 option_spec = { "header-rows": directives.nonnegative_int, "class": directives.class_option, "name": directives.unchanged, "align": align, "width": directives.length_or_percentage_or_unitless, "widths": directives.value_or( ("auto", "grid"), directives.positive_int_list ), } def run(self): node = dialecttable("") # generate a placeholder table since in process_dialect_table # there seem to be no access to state and state_machine text = [ "* - Database", # " - :term:`Fully tested in CI`", " - :term:`Supported version`", " - :term:`Best effort`", # Mock row. Will be replaced in process_dialect_table "* - **placeholder**", # " - placeholder", " - placeholder", " - placeholder", ] self.options["header-rows"] = 1 list_table = ListTable( name="list-table", arguments=self.arguments, options=self.options, content=StringList(text), lineno=self.lineno, content_offset=self.content_offset, block_text="", state=self.state, state_machine=self.state_machine, ) node.extend(list_table.run()) return [node] def purge_dialects(app, env, docname): if not hasattr(env, "dialect_data"): return # not sure what this does env.dialect_data = [ dialect for dialect in env.dialect_data if dialect["sphinx_docname"] != docname ] def merge_dialects(app, env, docnames, other): if not hasattr(env, "dialect_data"): env.dialect_data = [] if hasattr(other, "dialect_data"): env.dialect_data.extend(other.dialect_data) def process_dialect_table(app, doctree, fromdocname): # Replace all dialecttable nodes with a table with the collected data env = app.builder.env if not hasattr(env, "dialect_data"): env.dialect_data = [] seen = set() dialect_data = [] for d in env.dialect_data: if d["name"] not in seen: seen.add(d["name"]) dialect_data.append(d) dialect_data.sort(key=lambda d: d["name"]) for node in doctree.traverse(dialecttable): if not dialect_data: node.replace_self([]) return tbody = list(node.traverse(nodes.tbody)) assert len(tbody) == 1 tbody = tbody[0] assert len(tbody) == 1 templateRow = tbody[0] tbody.remove(templateRow) for dialect_info in dialect_data: row = templateRow.deepcopy() text_to_replace = list(row.traverse(nodes.Text)) assert len(text_to_replace) == 3 columns = [ # TODO: it would be great for this first element to # be hyperlinked dialect_info["name"], # dialect_info.get("full_support", "-"), dialect_info.get("normal_support", "-"), dialect_info.get("best_effort", "-"), ] for text_node, col_text in zip(text_to_replace, columns): text_node.parent.remove(text_node) text_node.parent.append(nodes.Text(col_text, col_text)) tbody.append(row) node.replace_self([node.children[0]]) def setup(app): app.add_node(dialecttable) app.add_directive("dialect", DialectDirective) app.add_directive("dialect-table", DialectTableDirective) app.connect("doctree-resolved", process_dialect_table) app.connect("env-purge-doc", purge_dialects) app.connect("env-merge-info", merge_dialects) zzzeeksphinx-1.6.0/zzzeeksphinx/extras.py 0000644 0000000 0000000 00000004175 13615410400 015655 0 ustar 00 from docutils import nodes from docutils.nodes import Admonition from docutils.nodes import Element from docutils.nodes import topic from docutils.parsers.rst.directives.admonitions import BaseAdmonition from docutils.parsers.rst.directives.body import Topic from sphinx.locale import _ from sphinx.locale import admonitionlabels class footer_topic(topic): pass class deepalchemy(Admonition, Element): pass class DeepAlchemy(BaseAdmonition): required_arguments = 0 node_class = deepalchemy class legacy(Admonition, Element): pass class Legacy(BaseAdmonition): required_arguments = 0 node_class = legacy class FooterTopic(Topic): node_class = footer_topic def visit_footer_topic(self, node): self.visit_topic(node) def depart_footer_topic(self, node): self.depart_topic(node) def visit_deepalchemy(self, node): self.visit_admonition(node, "deepalchemy") def visit_legacy(self, node): self.visit_admonition(node, "legacy") def depart_admonition(self, node): self.depart_admonition(node) deepalchemy_visit = (visit_deepalchemy, depart_admonition) legacy_visit = (visit_legacy, depart_admonition) footer_topic_visit = (visit_footer_topic, depart_footer_topic) def move_footer(app, doctree): if doctree.traverse(footer_topic): dec = nodes.decoration() doctree.append(dec) for f1 in doctree.traverse(footer_topic): dec.append(f1.deepcopy()) f1.parent.remove(f1) visit_keys = [ "html", "html5", "latex", "text", "xml", "texinfo", "manpage", ] def setup(app): app.add_directive("footer_topic", FooterTopic) admonitionlabels["deepalchemy"] = _("Deep Alchemy") app.add_directive("deepalchemy", DeepAlchemy) app.add_node(deepalchemy, **{key: deepalchemy_visit for key in visit_keys}) admonitionlabels["legacy"] = _("Legacy Feature") app.add_directive("legacy", Legacy) app.add_node(legacy, **{key: legacy_visit for key in visit_keys}) app.add_node( footer_topic, **{key: footer_topic_visit for key in ["html", "html5"]} ) app.connect("doctree-read", move_footer) zzzeeksphinx-1.6.0/zzzeeksphinx/mako.py 0000644 0000000 0000000 00000010454 13615410400 015273 0 ustar 00 from __future__ import absolute_import import os import re from mako.lookup import TemplateLookup from sphinx.application import TemplateBridge from sphinx.jinja2glue import BuiltinTemplateLoader from .toc import TOCMixin rtd = os.environ.get("READTHEDOCS", None) == "True" class MakoBridge(TOCMixin, TemplateBridge): def init(self, builder, *args, **kw): self.jinja2_fallback = BuiltinTemplateLoader() self.jinja2_fallback.init(builder, *args, **kw) # for gettext builder self.environment = self.jinja2_fallback.environment builder.config.html_context["release_date"] = builder.config[ "release_date" ] protocol_agnostic = builder.config["site_base"] protocol_agnostic = re.sub("^https?://", "//", protocol_agnostic) builder.config.html_context["site_base"] = protocol_agnostic self.app = builder.app package_dir = os.path.abspath(os.path.dirname(__file__)) template_path = os.path.join( package_dir, "themes", builder.config.html_theme ) # note: don't use strict_undefined. it means that a variable # cannot even be used conditionally, or with any inheriting template # that attempts to override the %def/%block that would normally # call upon that variable. self.lookup = TemplateLookup( directories=[template_path] + ( [ dir_ for dir_ in builder.theme.get_theme_dirs() if "zzzeeksphinx" in str(dir_) ] if hasattr(builder, "theme") else [] ), # format_exceptions=True, imports=["from zzzeeksphinx import util"], ) if rtd and builder.config["site_base"]: import requests if builder.config["site_adapter_template"]: # remote site layout / startup files template_name = builder.config["site_adapter_template"] template = requests.get( builder.config["site_base"] + "/" + template_name ).content self.lookup.put_string(template_name, template) py_name = builder.config["site_adapter_py"] if py_name: setup_ctx = requests.get( builder.config["site_base"] + "/" + py_name ).content lcls = {} exec(setup_ctx, lcls) self.setup_ctx = lcls["setup_context"] def setup_ctx(self, context): pass def render(self, template, context): template = template.replace(".html", ".mako") context["prevtopic"] = context.pop("prev", None) context["nexttopic"] = context.pop("next", None) context["app"] = self.app # local docs layout context["rtd"] = False context["toolbar"] = False context["base"] = "static_base.mako" context["parent_toc"] = self.get_current_subtoc context["local_toc"] = self.get_local_toc context["bridge"] = self context.setdefault("toc", None) context.setdefault("pdf_url", None) context.setdefault("metatags", None) context.setdefault("canonical_url", None) context.setdefault("single_version", None) context.setdefault("rtd_language", "en") context.setdefault("is_prerelease_version", False) context.setdefault("is_legacy_version", False) context.setdefault("is_current_version", False) # override context attributes self.setup_ctx(context) context.setdefault("_", lambda x: x) return self.lookup.get_template(template).render_unicode(**context) def render_string(self, template, context): # this is used for .js, .css etc. and we don't have # local copies of that stuff here so use the jinja render. return self.jinja2_fallback.render_string(template, context) def setup(app): app.config["template_bridge"] = "zzzeeksphinx.mako.MakoBridge" app.add_config_value("release_date", "", "env") app.add_config_value("site_base", "", "env") app.add_config_value("site_adapter_template", "", "env") app.add_config_value("site_adapter_py", "", "env") app.add_config_value("build_number", "", "env") zzzeeksphinx-1.6.0/zzzeeksphinx/render_pydomains.py 0000644 0000000 0000000 00000005146 13615410400 017710 0 ustar 00 import re from docutils import nodes from sphinx.addnodes import pending_xref from sphinx.util import logging LOG = logging.getLogger(__name__) def replace_synonyms(app, doctree): py_nodes = doctree.traverse(pending_xref) replace_prefixes = app.env.config.zzzeeksphinx_module_prefixes for py_node in py_nodes: if not py_node.children or not py_node.children[0].children: continue reftype = py_node.attributes["reftype"] reftarget = py_node.attributes["reftarget"] needs_correction = False ref_tokens = reftarget.split(".") if ref_tokens[0] in replace_prefixes: ref_tokens[0] = replace_prefixes[ref_tokens[0]] needs_correction = True py_node.attributes["reftarget"] = ".".join(ref_tokens) if reftype in ("meth", "attr", "paramref"): lt = len(ref_tokens) if ( reftype == "paramref" and lt >= 3 and ref_tokens[-3][0].isupper() ): # for paramref look at first char of "method" token # to see if its a method name or if this is a # function. paramrefs don't store this info right now. need = 3 else: need = min(lt, 2) corrected_name = ".".join(ref_tokens[-need:]) elif reftype in ("func", "obj", "data", "mod"): if needs_correction or re.match( r"^:(?:func|obj|data|mod):`[\.~].+`$", py_node.rawsource ): corrected_name = ref_tokens[-1] else: # print( # "no longer correcting: %s %s %s" # % (py_node.rawsource, py_node.source, py_node.line) # ) continue elif reftype == "class" and ( needs_correction or re.match(r"^:class:`\..+`$", py_node.rawsource) ): corrected_name = ref_tokens[-1] else: if needs_correction: LOG.warn( "source %r at %s needs synonym correction but is not " "handled by zzzeeksphinx", py_node.rawsource, py_node.source, ) continue if reftype in ("meth", "func"): corrected_name += "()" py_node.children[0].pop(0) py_node.children[0].insert( 0, nodes.Text(corrected_name, corrected_name) ) def setup(app): app.connect("doctree-read", replace_synonyms) app.add_config_value("zzzeeksphinx_module_prefixes", {}, "env") zzzeeksphinx-1.6.0/zzzeeksphinx/scss.py 0000644 0000000 0000000 00000004031 13615410400 015311 0 ustar 00 from __future__ import absolute_import import os import sass # these docs aren't super accurate # http://pyscss.readthedocs.org/en/latest/ def _check_for_builder(app): # TODO: make this inclusive of HTML builders # instead, or something if app.builder.name == "latex" or app.builder.name == "gettext": return False else: return True def add_stylesheet(app): if not _check_for_builder(app): return to_gen = [] package_dir = os.path.abspath(os.path.dirname(__file__)) static_path = os.path.join( package_dir, "themes", app.builder.config.html_theme, "static" ) for fname in os.listdir(static_path): name, ext = os.path.splitext(fname) if ext == ".scss": to_gen.append((static_path, name)) elif ext == ".css": if hasattr(app, "add_css_file"): app.add_css_file(fname) else: app.add_stylesheet(fname) # sphinx doesn't really have a "temp" area that will persist # down into build-finished (env.temp_data gets emptied). # So make our own! app._builder_scss = to_gen for path, name in to_gen: # changed in 1.8 # https://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx.application.Sphinx.add_css_file app.add_css_file("%s.css" % name) def generate_stylesheet(app, exception): # TODO: make this inclusive of HTML builders # instead, or something if not _check_for_builder(app): return to_gen = app._builder_scss if exception: return for static_path, name in to_gen: css = sass.compile( string=open(os.path.join(static_path, "%s.scss" % name)).read() ) dest = os.path.join(app.builder.outdir, "_static", "%s.css" % name) # copyfile(os.path.join(source, "%s.css" % name), dest) with open(dest, "w") as out: out.write(css) def setup(app): app.connect("builder-inited", add_stylesheet) app.connect("build-finished", generate_stylesheet) zzzeeksphinx-1.6.0/zzzeeksphinx/sqlformatter.py 0000644 0000000 0000000 00000024437 13615410400 017075 0 ustar 00 from __future__ import absolute_import import re import pygments from pygments.filter import apply_filters from pygments.filter import Filter from pygments.formatters import HtmlFormatter from pygments.formatters import LatexFormatter from pygments.lexer import bygroups from pygments.lexer import RegexLexer from pygments.lexer import using from pygments.lexer import words from pygments.lexers import PythonConsoleLexer from pygments.lexers import PythonLexer from pygments.lexers import SqlLexer from pygments.token import Keyword from pygments.token import Token from sphinx import highlighting from sphinx.highlighting import PygmentsBridge def _strip_trailing_whitespace(iter_): buf = list(iter_) if buf: buf[-1] = (buf[-1][0], buf[-1][1].rstrip()) for t, v in buf: yield t, v class RealWorldSQLLexer(SqlLexer): tokens = {k: l[:] for (k, l) in SqlLexer.tokens.items()} tokens["root"].insert(0, (words(("RETURNING",), suffix=r"\b"), Keyword)) class StripDocTestFilter(Filter): def filter(self, lexer, stream): for ttype, value in stream: if ( ttype is Token.Comment or ttype.parent is Token.Comment ) and re.match(r"#\s*doctest:", value): continue yield ttype, value class DetectAnnotationsFilter(Filter): def filter(self, lexer, stream): first, second = None, None found_colon = False should_report = False annotated = None found_sql = False for ttype, value in stream: # any encounting of SQL blocks, stop immediately. This is # likely not a class def example and we don't want the # "anno/non-anno" label to appear under SQL boxes at all if ttype is Token.Name and value in ( "execsql", "printsql", "opensql", "sqlpopup", ): found_sql = True should_report = False if found_sql: yield ttype, value continue if ttype is Token.Name.Builtin: ttype = Token.Name if ttype is Token.Keyword and value == "class": should_report = True first = second second = ttype, value yield ttype, value if annotated: continue elif annotated is None and ttype is not Token.Text: annotated = False if (first, second) == ARROW_ANNOTATION: annotated = True elif found_colon: if (ttype, value) == NEWLINE: found_colon = False elif ttype == Token.Name: found_colon = False annotated = True elif first and ((first[0:1], second) == COLON_ANNOTATION): found_colon = True # should_report = True # report only on examples that have class defs if annotated is not None and should_report: yield Token.Other, f"pep484 annotations detected: {annotated}" class PyConWithSQLLexer(RegexLexer): name = "PyCon+SQL" aliases = ["pycon+sql"] flags = re.IGNORECASE | re.DOTALL tokens = { "root": [ (r"{sql}", Token.Sql.Link, "sqlpopup"), (r"{execsql}", Token.Sql.Exec, "execsql"), (r"{opensql}", Token.Sql.Exec, "opensql"), # alias of execsql (r"{printsql}", Token.Sql.Print, "printsql"), (r".*?\n", using(PythonConsoleLexer)), ], "sqlpopup": [ ( r"(.*?\n)((?:PRAGMA|BEGIN|WITH|SE\.\.\.|SELECT|INSERT|" "DELETE|ROLLBACK|" "COMMIT|ALTER|UPDATE|CREATE|DROP|PRAGMA" "|DESCRIBE).*?(?:{stop}\n?|$))", bygroups(using(PythonConsoleLexer), Token.Sql.Popup), "#pop", ) ], "execsql": [(r".*?(?:{stop}\n*|$)", Token.Sql.ExecState, "#pop")], "opensql": [(r".*?(?:{stop}\n*|$)", Token.Sql.ExecState, "#pop")], "printsql": [(r".*?(?:{stop}\n*|$)", Token.Sql.PrintState, "#pop")], } class PythonWithSQLLexer(RegexLexer): name = "Python+SQL" aliases = ["python+sql"] flags = re.IGNORECASE | re.DOTALL tokens = { "root": [ (r"{sql}", Token.Sql.Link, "sqlpopup"), (r"{execsql}", Token.Sql.Exec, "execsql"), (r"{opensql}", Token.Sql.Exec, "opensql"), # alias of execsql (r"{printsql}", Token.Sql.Print, "printsql"), (r".*?\n", using(PythonLexer)), ], "sqlpopup": [ ( r"(.*?\n)((?:PRAGMA|BEGIN|SELECT|INSERT|DELETE|ROLLBACK" "|COMMIT|ALTER|UPDATE|CREATE|DROP" "|PRAGMA|DESCRIBE).*?(?:{stop}\n?|$))", bygroups(using(PythonLexer), Token.Sql.Popup), "#pop", ) ], "execsql": [(r".*?(?:{stop}\n*|$)", Token.Sql.ExecState, "#pop")], "opensql": [(r".*?(?:{stop}\n*|$)", Token.Sql.ExecState, "#pop")], "printsql": [(r".*?(?:{stop}\n*|$)", Token.Sql.PrintState, "#pop")], } class PopupSQLFormatter(HtmlFormatter): def _format_lines(self, tokensource): sql_lexer = RealWorldSQLLexer() formatter = HtmlFormatter(nowrap=True) buf = [] for ttype, value in apply_filters(tokensource, [StripDocTestFilter()]): if ttype in Token.Sql: for t, v in HtmlFormatter._format_lines(self, iter(buf)): yield t, v buf = [] if ttype in (Token.Sql.ExecState, Token.Sql.PrintState): class_ = ( "show_sql" if ttype is Token.Sql.ExecState else "show_sql_print" ) yield ( 1, f"
") lines[0:1] = [before + "", after] # nothing to do for the last line; it always starts withanyway # now that we have code lines (starting at index 1), insert anchors for # the collected tags (HACK: this only works if the tag boundaries are # properly nested!) maxindex = len(lines) - 1 for name, docname in used.items(): type_, start, end = tags[name] backlink = urito(pagename, docname) + "#" + refname + "." + name lines[start] = ( '%s' % (name, backlink, _("[docs]")) + lines[start] ) lines[min(end, maxindex)] += "" # try to find parents (for submodules) parents = [] parent = modname while "." in parent: parent = parent.rsplit(".", 1)[0] if parent in modnames: parents.append( { "link": urito( pagename, "_modules/" + parent.replace(".", "/") ), "title": parent, } ) parents.append( { "link": urito(pagename, "_modules/index"), "title": _("Module code"), } ) parents.reverse() # putting it all together context = { "parents": parents, "title": modname, "body": ( _("Source code for %s
") % modname + "\n".join(lines) ), } yield (pagename, context, "page.html") if not modnames: return html = ["\n"] # the stack logic is needed for using nested lists for submodules stack = [""] for modname in sorted(modnames): if modname.startswith(stack[-1]): stack.append(modname + ".") html.append("
|
|
% for i, (key, dummy) in enumerate(genindexentries): ${i > 0 and '| ' or ''} ${key} % endfor
${_('Full index on one page')}
% endif ${parent.sidebarrel()} %def> zzzeeksphinx-1.6.0/zzzeeksphinx/themes/zsbase/layout.mako 0000644 0000000 0000000 00000024104 13615410400 020731 0 ustar 00 ## coding: utf-8 <%! import os import time from datetime import datetime, timezone if "SOURCE_DATE_EPOCH" in os.environ: generated_at = datetime.fromtimestamp( timestamp=float(os.environ['SOURCE_DATE_EPOCH']), tz=timezone.utc ) else: generated_at = datetime.fromtimestamp( timestamp=time.time(), ).astimezone() local_script_files = [] default_css_files = [ '_static/pygments.css', ] %> <%doc> Structural elements are all prefixed with "docs-" to prevent conflicts when the structure is integrated into the main site. docs-container -> docs-top-navigation-container -> docs-header -> docs-version-header docs-top-navigation docs-top-page-control docs-navigation-banner docs-body-container -> docs-sidebar docs-body docs-bottom-navigation docs-copyright %doc> <%inherit file="${context['base']}"/> <% if builder == 'epub': next.body() return %> <% withsidebar = bool(toc) and ( theme_index_sidebar is True or current_page_name != 'index' ) %> <%block name="head_title"> % if theme_index_sidebar or current_page_name != 'index': ${capture(self.show_title) | util.striptags} — % endif ${docstitle|h} %block> <%def name="show_title()"> ${title} %def>Can't find the page you're looking for.
<%block name="footer"> ${parent.footer()} %block> zzzeeksphinx-1.6.0/zzzeeksphinx/themes/zsbase/page.mako 0000644 0000000 0000000 00000000103 13615410400 020321 0 ustar 00 <%inherit file="layout.mako"/> ${body| util.strip_toplevel_anchors} zzzeeksphinx-1.6.0/zzzeeksphinx/themes/zsbase/search.mako 0000644 0000000 0000000 00000000621 13615410400 020657 0 ustar 00 <%inherit file="layout.mako"/> <%! local_script_files = ['_static/searchtools.js', '_static/language_data.js'] %> <%block name="show_title"> ${_('Search')} %block> <%block name="footer"> ${parent.footer()} %block> <%block name="lower_scripts"> ${parent.lower_scripts()} %block> zzzeeksphinx-1.6.0/zzzeeksphinx/themes/zsbase/static_base.mako 0000644 0000000 0000000 00000001737 13615410400 021704 0 ustar 00 <%def name="bannerad()">%def> ${metatags and metatags or ''}