From eb3f024f1aa90bc8ad5630fa8f7c8fa33f1b5121 Mon Sep 17 00:00:00 2001 From: James Knight Date: Sun, 3 Sep 2023 09:46:28 -0400 Subject: [PATCH 1/4] rework build caching The following provides a series of corrections and improvements towards this extension tracking outdated pages and managing/tracking publication information to help manage cleanup processes when only subset of pages are updated. Part of the problem with this extension's original implementation is that lack of consideration for Sphinx's documentation cache. In most use cases, the `-E` argument was used/promoted to start with a clean environment to ensure all documents are processed and published. For uses who did not force a clean environment would see unexpected results, mainly when cleanup options were enabled. Specifically, after publishing documentation, a re-publish of new documentation with a subset of changes could make some valid-but-not-updated pages be removed. This commit should bring proper cache management to handle rebuilds and republish events. We now track a hash of the Confluence-specific configuration hash to help immediately flag pages as outdated when these entries are updated. We also now keep track of last page publish identifiers from a previous publication event. This information can be used to unflag pages that are believed to be legacy pages, ensuring up-to-date pages remain on the target Confluence instance. Signed-off-by: James Knight --- sphinxcontrib/confluencebuilder/builder.py | 53 ++-- sphinxcontrib/confluencebuilder/config/env.py | 40 +++ sphinxcontrib/confluencebuilder/env.py | 231 ++++++++++++++++++ 3 files changed, 304 insertions(+), 20 deletions(-) create mode 100644 sphinxcontrib/confluencebuilder/env.py diff --git a/sphinxcontrib/confluencebuilder/builder.py b/sphinxcontrib/confluencebuilder/builder.py index 02b82961..79f07e98 100644 --- a/sphinxcontrib/confluencebuilder/builder.py +++ b/sphinxcontrib/confluencebuilder/builder.py @@ -19,6 +19,8 @@ from sphinxcontrib.confluencebuilder.config.checks import validate_configuration from sphinxcontrib.confluencebuilder.config.defaults import apply_defaults from sphinxcontrib.confluencebuilder.config.env import apply_env_overrides +from sphinxcontrib.confluencebuilder.config.env import build_hash +from sphinxcontrib.confluencebuilder.env import ConfluenceCacheInfo from sphinxcontrib.confluencebuilder.intersphinx import build_intersphinx from sphinxcontrib.confluencebuilder.logger import ConfluenceLogger from sphinxcontrib.confluencebuilder.nodes import confluence_footer @@ -67,12 +69,15 @@ def __init__(self, app, env=None): self.domain_indices = {} self.file_suffix = '.conf' self.info = ConfluenceLogger.info + self.legacy_assets = {} + self.legacy_pages = None self.link_suffix = None self.metadata = defaultdict(dict) self.nav_next = {} self.nav_prev = {} self.omitted_docnames = [] self.orphan_docnames = [] + self.parent_id = None self.publish_allowlist = None self.publish_denylist = None self.publish_docnames = [] @@ -84,8 +89,10 @@ def __init__(self, app, env=None): self.use_search = None self.verbose = ConfluenceLogger.verbose self.warn = ConfluenceLogger.warn + self._cache_info = ConfluenceCacheInfo(self) self._cached_footer_data = None self._cached_header_data = None + self._config_confluence_hash = None self._original_get_doctree = None self._verbose = self.app.verbosity @@ -148,6 +155,14 @@ def init(self): self.config.sphinx_verbosity = self._verbose self.publisher.init(self.config, self.cloud) + # With the configuration finalizes, generate a Confluence-specific + # configuration hash that is applicable to this run + self._config_confluence_hash = build_hash(config) + self.verbose('configuration hash ' + self._config_confluence_hash) + + self._cache_info.load_cache() + self._cache_info.configure(self._config_confluence_hash) + self.create_template_bridge() self.templates.init(self) @@ -209,27 +224,11 @@ def get_outdated_docs(self): """ Return an iterable of input files that are outdated. """ - # This method is taken from TextBuilder.get_outdated_docs() - # with minor changes to support :confval:`rst_file_transform`. + for docname in self.env.found_docs: - if docname not in self.env.all_docs: + if self._cache_info.is_outdated(docname): yield docname continue - sourcename = path.join(self.env.srcdir, docname + - self.file_suffix) - targetname = path.join(self.outdir, self.file_transform(docname)) - - try: - targetmtime = path.getmtime(targetname) - except Exception: - targetmtime = 0 - try: - srcmtime = path.getmtime(sourcename) - if srcmtime > targetmtime: - yield docname - except OSError: - # source doesn't exist anymore - pass def get_target_uri(self, docname, typ=None): return self.link_transform(docname) @@ -483,6 +482,8 @@ def write_doc(self, docname, doctree): except OSError as err: self.warn(f'error writing file {outfilename}: {err}') + self._cache_info.track_page_hash(docname) + def publish_doc(self, docname, output): conf = self.config title = self.state.title(docname) @@ -519,6 +520,8 @@ def publish_doc(self, docname, output): uploaded_id = self.publisher.store_page(title, data, parent_id) self.state.register_upload_id(docname, uploaded_id) + self._cache_info.track_last_page_id(docname, uploaded_id) + if self.config.root_doc == docname: self.root_doc_page_id = uploaded_id @@ -751,8 +754,6 @@ def finish(self): # publish generated output (if desired) if self.publish: - self.legacy_assets = {} - self.legacy_pages = None self.parent_id = self.publisher.get_base_page_id() for docname in status_iterator( @@ -802,9 +803,21 @@ def to_asset_name(asset): except OSError as err: self.warn(f'error reading asset {key}: {err}') + # if we have documents that were not changes (and therefore, not + # needing to be republished), assume any cached publish page ids + # are still valid and remove them from the legacy pages list + other_docs = self.env.all_docs.keys() - set(self.publish_docnames) + for unchanged_doc in other_docs: + lpid = self._cache_info.last_page_id(unchanged_doc) + if lpid is not None and lpid in self.legacy_pages: + self.legacy_pages.remove(lpid) + self.publish_cleanup() self.publish_finalize() + # persist cache from this run + self._cache_info.save_cache() + def cleanup(self): if self.publish: self.publisher.disconnect() diff --git a/sphinxcontrib/confluencebuilder/config/env.py b/sphinxcontrib/confluencebuilder/config/env.py index b7cc7b38..bd39ba7f 100644 --- a/sphinxcontrib/confluencebuilder/config/env.py +++ b/sphinxcontrib/confluencebuilder/config/env.py @@ -2,6 +2,7 @@ # Copyright Sphinx Confluence Builder Contributors (AUTHORS) from sphinxcontrib.confluencebuilder.logger import ConfluenceLogger as logger +from sphinxcontrib.confluencebuilder.util import ConfluenceUtil from sphinxcontrib.confluencebuilder.util import str2bool import os @@ -38,3 +39,42 @@ def apply_env_overrides(builder): conf[key] = int(env_val) else: conf[key] = env_val + + +def build_hash(config): + """ + builds a confluence configuration hash + + This call will build a hash based on Confluence-specific configuration + entries. This hash can later be used to determine whether or not + re-processing documents is needed based certain configuration values + being changed. + + Args: + config: the configuration + """ + + # extract confluence configuration options + entries = [] + for c in sorted(config.filter(['confluence'])): + entries.append(c.name) + entries.append(c.value) + + # compile a string to hash, sorting dictionary/list/etc. entries along + # the way + hash_data = [] + while entries: + value = entries.pop(0) + + if isinstance(value, dict): + sorted_value = dict(sorted(value.items())) + for k, v in sorted_value.items(): + entries.append(k) + entries.append(v) + elif isinstance(value, (list, set, tuple)): + entries.extend(sorted(value)) + else: + hash_data.append(str(value)) + + # generate a configuration hash + return ConfluenceUtil.hash(''.join(hash_data)) diff --git a/sphinxcontrib/confluencebuilder/env.py b/sphinxcontrib/confluencebuilder/env.py new file mode 100644 index 00000000..23197de4 --- /dev/null +++ b/sphinxcontrib/confluencebuilder/env.py @@ -0,0 +1,231 @@ +# SPDX-License-Identifier: BSD-2-Clause +# Copyright Sphinx Confluence Builder Contributors (AUTHORS) + +from sphinxcontrib.confluencebuilder.util import ConfluenceUtil +import json +import os + + +# base filename for cache information +ENV_CACHE_BASENAME = '.cache_confluence_' + +# filename for configuration hash +ENV_CACHE_CONFIG = ENV_CACHE_BASENAME + 'config' + +# filename for documentation hashes +ENV_CACHE_DOCHASH = ENV_CACHE_BASENAME + 'dochash' + +# filename for last publication identifiers +ENV_CACHE_PUBLISH = ENV_CACHE_BASENAME + 'publish' + + +class ConfluenceCacheInfo: + def __init__(self, builder): + outdir = builder.outdir + + self.builder = builder + self.env = builder.env + self._active_dochash = {} + self._active_hash = None + self._active_pids = {} + self._cache_cfg_file = os.path.join(outdir, ENV_CACHE_CONFIG) + self._cache_hash_file = os.path.join(outdir, ENV_CACHE_DOCHASH) + self._cache_publish_file = os.path.join(outdir, ENV_CACHE_PUBLISH) + self._cached_dochash = {} + self._cached_hash = None + self._cached_pids = {} + + def configure(self, hash_): + """ + track the active configuration hash + + This call is used to accept the known hash representing the active + configuration of a Confluence builder run. This hash can later be + used when checking for outdated documents, as well as saving on a + run to be used to track outdated documents in future runs (if any). + + Args: + hash_: the configuration hash + """ + + self._active_hash = hash_ + + def is_outdated(self, docname): + """ + check if a provided document is considered outdated + + This call can return whether a provided document name is believed + to be outdated and requires a new build. + + Args: + docname: the name of the document + + Returns: + whether the page is outdated + """ + + # if the document was not already cached in Sphinx's environment, + # consider is outdated + if docname not in self.env.all_docs: + return True + + # if there is no previous cached hash, all documents are considered + # outdated + if not self._cached_hash: + return True + + # if there is not output file, considered outdated + dst_filename = self.builder.file_transform(docname) + dst_file = os.path.join(self.builder.outdir, dst_filename) + if not os.path.exists(dst_file): + return True + + # if there is not source file (removed document), considered outdated + src_file = self.env.doc2path(docname) + if not os.path.exists(src_file): + return True + + # check if the hashes do not match; if not, this document is outdated + doc_hash = self.track_page_hash(docname) + old_doc_hash = self._cached_dochash.get(docname) + return doc_hash != old_doc_hash + + def last_page_id(self, docname): + """ + return the last publish page identifier for a document (if any) + + This call can return the last page identifier a specific document was + published to, if published at all. This is to help unflag documents + queued for removal (cleanup), when documents are skipped since they + are not outdated and not processed for writing. + + Args: + docname: the name of the document + + Returns: + the page identifier or ``None`` + """ + + pid = self._cached_pids.get(docname) + + # if we checked for a "last page id" for a document, this means that + # we there is a new or unchanged document being processed -- if an + # unchanged document, used the cached page id and track it as an + # assumed active id + if pid is not None: + self.track_last_page_id(docname, pid) + + return pid + + def track_page_hash(self, docname): + """ + track the last publish page identifier for a document + + This call can be used to track last page identifier a specific + document was published to. This is to help on re-runs where a + run may wish to be aware of already published documents. + + Args: + docname: the name of the document + + Returns: + the document's hash + """ + + doc_hash = self._active_dochash.get(docname) + if doc_hash: + return doc_hash + + # determine the hash of the document based on data + config-hash + src_file = self.env.doc2path(docname) + src_file_hash = ConfluenceUtil.hash_asset(src_file) + doc_hash_data = self._active_hash + src_file_hash + doc_hash = ConfluenceUtil.hash(doc_hash_data) + + # remember this document hash when we may later save + self._active_dochash[docname] = doc_hash + + return doc_hash + + def track_last_page_id(self, docname, id): + """ + track the last publish page identifier for a document + + This call can be used to track last page identifier a specific + document was published to. This is to help on re-runs where a + run may wish to be aware of already published documents. + + Args: + docname: the name of the document + id: the page identifier + """ + + self._active_pids[docname] = id + self._cached_pids.pop(docname, None) + + def load_cache(self): + """ + load persisted cached information from a previous run (if any) + + After build run with Confluence, information about the build may be + cached to help track outdated documents. This call can reload any + cache information stored from a previous run. + """ + + try: + with open(self._cache_cfg_file, encoding='utf-8') as f: + self._cached_hash = json.load(f).get('hash') + except FileNotFoundError: + pass + except OSError as e: + self.builder.warn('failed to load cache (config): ' + e) + + try: + with open(self._cache_hash_file, encoding='utf-8') as f: + self._cached_dochash = json.load(f) + except FileNotFoundError: + pass + except OSError as e: + self.builder.warn('failed to load cache (hashes): ' + e) + + try: + with open(self._cache_publish_file, encoding='utf-8') as f: + self._cached_pids = json.load(f) + except FileNotFoundError: + pass + except OSError as e: + self.builder.warn('failed to load cache (pids): ' + e) + + def save_cache(self): + """ + save persisted cached information from a run + + Save the updated state of this build information instance into a + cache file stored in the project's output directory. This information + can be later used for re-runs tracking outdated documents. + """ + + new_cfg = { + 'hash': self._active_hash, + } + + new_dochashs = dict(self._cached_dochash) + new_dochashs.update(self._active_dochash) + + try: + with open(self._cache_cfg_file, 'w', encoding='utf-8') as f: + json.dump(new_cfg, f) + except OSError as e: + self.builder.warn('failed to save cache (config): ' + e) + + try: + with open(self._cache_hash_file, 'w', encoding='utf-8') as f: + json.dump(new_dochashs, f) + except OSError as e: + self.builder.warn('failed to save cache (hashes): ' + e) + + try: + with open(self._cache_publish_file, 'w', encoding='utf-8') as f: + json.dump(self._active_pids, f) + except OSError as e: + self.builder.warn('failed to save cache (pids): ' + e) From 603771182a4823892dbc01fb2cdace71e187389a Mon Sep 17 00:00:00 2001 From: James Knight Date: Sun, 3 Sep 2023 09:47:23 -0400 Subject: [PATCH 2/4] tests: adding tests to verify build cache and legacy pages With the re-work in managing build cache and managing legacy pages, adding a series of unit tests to verify these capabilities. Signed-off-by: James Knight --- tests/lib/__init__.py | 5 +- tests/unit-tests/test_cache.py | 165 ++++++++++++++++++++++++ tests/unit-tests/test_legacy_pages.py | 174 ++++++++++++++++++++++++++ 3 files changed, 342 insertions(+), 2 deletions(-) create mode 100644 tests/unit-tests/test_cache.py create mode 100644 tests/unit-tests/test_legacy_pages.py diff --git a/tests/lib/__init__.py b/tests/lib/__init__.py index 6c622038..3337a5d0 100644 --- a/tests/lib/__init__.py +++ b/tests/lib/__init__.py @@ -616,7 +616,7 @@ def prepare_sphinx_filenames(src_dir, filenames, configs=None): def build_sphinx(src_dir, config=None, out_dir=None, extra_config=None, - builder=None, relax=False, filenames=None): + builder=None, relax=False, filenames=None, force=True): """ prepare a sphinx application instance @@ -632,6 +632,7 @@ def build_sphinx(src_dir, config=None, out_dir=None, extra_config=None, builder (optional): the builder to use relax (optional): do not generate warnings as errors filenames (optional): specific documents to process + force (optional): whether to force process each document Returns: the output directory @@ -641,7 +642,7 @@ def build_sphinx(src_dir, config=None, out_dir=None, extra_config=None, out_dir = prepare_dirs() files = [] - force_all = True + force_all = force if filenames: # force-all not supported when using explicit filenames diff --git a/tests/unit-tests/test_cache.py b/tests/unit-tests/test_cache.py new file mode 100644 index 00000000..a1b91a01 --- /dev/null +++ b/tests/unit-tests/test_cache.py @@ -0,0 +1,165 @@ +# SPDX-License-Identifier: BSD-2-Clause +# Copyright Sphinx Confluence Builder Contributors (AUTHORS) + +from sphinxcontrib.confluencebuilder.util import temp_dir +from tests.lib import prepare_dirs +from tests.lib.testcase import ConfluenceTestCase +import os + + +class TestCache(ConfluenceTestCase): + def test_cache_outdated_config(self): + """validate triggering outdated content with config change""" + # + # Ensures documents will be flagged as outdated if a + # Confluence-specific configuration flags that a rebuild is needed. + + config = dict(self.config) + dataset = os.path.join(self.datasets, 'minimal') + out_dir = prepare_dirs() + src_docs = [] + + def doctree_resolved_handler(app, doctree, docname): + src_docs.append(docname) + + with self.prepare(dataset, config=config, out_dir=out_dir) as app: + app.connect('doctree-resolved', doctree_resolved_handler) + app.build() + + self.assertListEqual(src_docs, [ + 'index', + ]) + + # re-run with no changes -- no document will be outdated and + # no source documents will be read + src_docs.clear() + + with self.prepare(dataset, config=config, out_dir=out_dir) as app: + app.connect('doctree-resolved', doctree_resolved_handler) + app.build() + + self.assertListEqual(src_docs, [ + ]) + + # re-run with a configuration change that won't trigger a rebuild + src_docs.clear() + config['confluence_watch'] = True + + with self.prepare(dataset, config=config, out_dir=out_dir) as app: + app.connect('doctree-resolved', doctree_resolved_handler) + app.build() + + self.assertListEqual(src_docs, [ + ]) + + # re-run with a configuration change to trigger an outdated doc + src_docs.clear() + config['confluence_global_labels'] = [ + 'new-label', + ] + + with self.prepare(dataset, config=config, out_dir=out_dir) as app: + app.connect('doctree-resolved', doctree_resolved_handler) + app.build() + + self.assertListEqual(src_docs, [ + 'index', + ]) + + def test_cache_outdated_content(self): + """validate handling outdated content""" + # + # Ensures the Sphinx engine will trigger/processed an outdated + # document. While the results should be expected (since we are + # basically re-validating a Sphinx capability), the purpose of this + # test is to ensure no oddities when an environment-flagged outdated + # document when this extension is loaded. + + added_docs = [] + changed_docs = [] + out_dir = prepare_dirs() + + def env_get_outdated(app, env, added, changed, removed): + added_docs.clear() + added_docs.extend(added) + added_docs.sort() + + changed_docs.clear() + changed_docs.extend(changed) + changed_docs.sort() + + return [] + + def write_doc(fname, data): + try: + with open(fname, 'w') as f: + f.write(data) + except OSError: + pass + + with temp_dir() as src_dir: + index_file = os.path.join(src_dir, 'index.rst') + write_doc(index_file, '''\ +index +===== + +content +''') + + second_file = os.path.join(src_dir, 'second.rst') + write_doc(second_file, '''\ +:orphan: + +second +====== + +more content +''') + + with self.prepare(src_dir, out_dir=out_dir) as app: + app.connect('env-get-outdated', env_get_outdated) + app.build() + + self.assertListEqual(added_docs, [ + 'index', + 'second', + ]) + + self.assertListEqual(changed_docs, [ + ]) + + # re-run with no changes -- no document will be outdated and + # no source documents will be read + + with self.prepare(src_dir, out_dir=out_dir) as app: + app.connect('env-get-outdated', env_get_outdated) + app.build() + + self.assertListEqual(added_docs, [ + ]) + + self.assertListEqual(changed_docs, [ + ]) + + # re-run with with a change to a single document, and this new + # file should be listed as outdated + changed_docs.clear() + write_doc(second_file, '''\ +:orphan: + +second +====== + +changed content +''') + + with self.prepare(src_dir, out_dir=out_dir) as app: + app.connect('env-get-outdated', env_get_outdated) + app.build() + + self.assertListEqual(added_docs, [ + ]) + + self.assertListEqual(changed_docs, [ + 'second', + ]) diff --git a/tests/unit-tests/test_legacy_pages.py b/tests/unit-tests/test_legacy_pages.py new file mode 100644 index 00000000..2aa44ebe --- /dev/null +++ b/tests/unit-tests/test_legacy_pages.py @@ -0,0 +1,174 @@ +# SPDX-License-Identifier: BSD-2-Clause +# Copyright Sphinx Confluence Builder Contributors (AUTHORS) + +from sphinxcontrib.confluencebuilder.builder import ConfluenceBuilder +from sphinxcontrib.confluencebuilder.util import temp_dir +from tests.lib import prepare_dirs +from tests.lib.testcase import ConfluenceTestCase +from unittest.mock import patch +import os + + +class TestConfluenceLegacyPages(ConfluenceTestCase): + def test_legacy_pages(self): + """validate publisher will store a page by id (default)""" + # + # Verify that a publisher can update an existing page by an + # identifier value. By default, the update request will ensure + # the user configures to not watch the page. + + config = dict(self.config) + config['confluence_publish'] = True + config['confluence_server_url'] = 'https://example.com/' + config['confluence_space_key'] = 'TEST' + config['confluence_cleanup_purge'] = True + + # prepare a mocked publisher that we can emulate publishing events + # and check if legacy pages are properly remain/purged + old_init = getattr(ConfluenceBuilder, 'init') + publisher = MockedPublisher() + + def wrapped_init(builder): + builder.publisher = publisher + return old_init(builder) + + # prepare a fixed source and working directory, since we will be + # performing rebuilds and want to ensure the extension handles + # rebuilds approriately (e.g. not purging pages that have not been + # updated but are still valid) + out_dir = prepare_dirs() + + with temp_dir() as src_dir: + conf_file = os.path.join(src_dir, 'conf.py') + write_doc(conf_file, '') + + index_file = os.path.join(src_dir, 'index.rst') + write_doc(index_file, '''\ +index +===== + +.. toctree:: + + second + third +''') + + second_file = os.path.join(src_dir, 'second.rst') + write_doc(second_file, '''\ +second +====== + +content +''') + + third_file = os.path.join(src_dir, 'third.rst') + write_doc(third_file, '''\ +third +===== + +content +''') + + # first pass build + with patch.object(ConfluenceBuilder, 'init', wrapped_init): + self.build(src_dir, config=config, out_dir=out_dir) + + # all three pages should be "published" + self.assertEqual(len(publisher.published), 3) + + # rebuild documentations; no pages should be removed even if + # not pages have been republished (since they are not outdated) + with patch.object(ConfluenceBuilder, 'init', wrapped_init): + self.build(src_dir, config=config, out_dir=out_dir, force=False) + + # no pages "published"; no pages removed + self.assertEqual(len(publisher.published), 0) + self.assertEqual(len(publisher.removed), 0) + + # remove the second file; update the index to drop the entry + os.remove(second_file) + + write_doc(index_file, '''\ +index +===== + +.. toctree:: + + third +''') + + # rebuild documentations; this should trigger an update of the + # index page and detect the third page is now legacy + with patch.object(ConfluenceBuilder, 'init', wrapped_init): + self.build(src_dir, config=config, out_dir=out_dir, force=False) + + self.assertEqual(len(publisher.published), 1) + self.assertListEqual(publisher.published, [ + 'index', + ]) + + self.assertEqual(len(publisher.removed), 1) + self.assertListEqual(publisher.removed, [ + 'second', + ]) + + +class MockedPublisher: + base_page_idx = 2 + page2id = {} + id2page = {} + + def init(self, config, cloud=None): + self.published = [] + self.removed = [] + + def get_base_page_id(self): + return 1 + + def get_descendants(self, page_id, mode): + return set(self.id2page.keys()) + + def remove_page(self, page_id): + page_name = self.id2page.get(page_id) + self.removed.append(page_name) + + def store_page(self, page_name, data, parent_id=None): + id = self.page2id.get(page_name) + if not id: + id = self.base_page_idx + self.base_page_idx += 1 + + self.page2id[page_name] = id + self.id2page[id] = page_name + + self.published.append(page_name) + + return id + + # other unused methods + + def connect(self): + pass + + def disconnect(self): + pass + + def get_ancestors(self, page_id): + return set() + + def get_attachments(self, page_id): + return {} + + def restrict_ancestors(self, ancestors): + pass + + def store_attachment(self, page_id, name, data, mimetype, hash_, force=False): + return 0 + + +def write_doc(fname, data): + try: + with open(fname, 'w') as f: + f.write(data) + except OSError: + pass From 2ff3974cf1eca3a8d81d86b61f5a69ce39e8ec48 Mon Sep 17 00:00:00 2001 From: James Knight Date: Sun, 3 Sep 2023 09:47:35 -0400 Subject: [PATCH 3/4] config: adding more extension options to flag outdated documents Adding a series of configuration entries to flag an outdated documentation set if these options are changed. These options were not set before, however, navigation changes can require a full republish, especially if other navigation options (e.g. next-prev pages) are enabled. Signed-off-by: James Knight --- sphinxcontrib/confluencebuilder/__init__.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/sphinxcontrib/confluencebuilder/__init__.py b/sphinxcontrib/confluencebuilder/__init__.py index 30468434..8edd9a75 100644 --- a/sphinxcontrib/confluencebuilder/__init__.py +++ b/sphinxcontrib/confluencebuilder/__init__.py @@ -86,7 +86,7 @@ def setup(app): # Default alignment for tables, figures, etc. cm.add_conf('confluence_default_alignment', 'confluence') # Enablement of a generated domain index documents - cm.add_conf('confluence_domain_indices') + cm.add_conf('confluence_domain_indices', 'confluence') # Confluence editor to target for publication. cm.add_conf('confluence_editor', 'confluence') # File to get page header information from. @@ -98,11 +98,11 @@ def setup(app): # Dictionary to pass to footer when rendering template. cm.add_conf('confluence_footer_data', 'confluence') # Enablement of a generated search documents - cm.add_conf_bool('confluence_include_search') + cm.add_conf_bool('confluence_include_search', 'confluence') # Enablement of a "page generated" notice. cm.add_conf_bool('confluence_page_generation_notice', 'confluence') # Enablement of publishing pages into a hierarchy from a root toctree. - cm.add_conf_bool('confluence_page_hierarchy') + cm.add_conf_bool('confluence_page_hierarchy', 'confluence') # Show previous/next buttons (bottom, top, both, None). cm.add_conf('confluence_prev_next_buttons_location', 'confluence') # Suffix to put after section numbers, before section name @@ -110,7 +110,7 @@ def setup(app): # Enablement of a "Edit/Show Source" reference on each document cm.add_conf('confluence_sourcelink', 'confluence') # Enablement of a generated index document - cm.add_conf_bool('confluence_use_index') + cm.add_conf_bool('confluence_use_index', 'confluence') # Enablement for toctrees for singleconfluence documents. cm.add_conf_bool('singleconfluence_toctree', 'singleconfluence') @@ -132,7 +132,7 @@ def setup(app): # Explicitly prevent page notifications on update. cm.add_conf_bool('confluence_disable_notifications') # Define a series of labels to apply to all published pages. - cm.add_conf('confluence_global_labels') + cm.add_conf('confluence_global_labels', 'confluence') # Enablement of configuring root as space's homepage. cm.add_conf_bool('confluence_root_homepage') # Translation to override parent page identifier to publish to. From df1d6293c05862505678ebfdb008316a92743937 Mon Sep 17 00:00:00 2001 From: James Knight Date: Sun, 3 Sep 2023 10:37:20 -0400 Subject: [PATCH 4/4] cleanup comment Signed-off-by: James Knight --- sphinxcontrib/confluencebuilder/env.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sphinxcontrib/confluencebuilder/env.py b/sphinxcontrib/confluencebuilder/env.py index 23197de4..0c27797a 100644 --- a/sphinxcontrib/confluencebuilder/env.py +++ b/sphinxcontrib/confluencebuilder/env.py @@ -119,11 +119,11 @@ def last_page_id(self, docname): def track_page_hash(self, docname): """ - track the last publish page identifier for a document + track the last publish page hash for a document - This call can be used to track last page identifier a specific - document was published to. This is to help on re-runs where a - run may wish to be aware of already published documents. + This call can be used to track last page hash a specific document. + This is to help on re-runs when checking to see if a given page + is outdated if the hash changes. Args: docname: the name of the document