This is page 19 of 35. Use http://codebase.md/dicklesworthstone/llm_gateway_mcp_server?lines=false&page={x} to view the full context.
# Directory Structure
```
├── .cursorignore
├── .env.example
├── .envrc
├── .gitignore
├── additional_features.md
├── check_api_keys.py
├── completion_support.py
├── comprehensive_test.py
├── docker-compose.yml
├── Dockerfile
├── empirically_measured_model_speeds.json
├── error_handling.py
├── example_structured_tool.py
├── examples
│ ├── __init__.py
│ ├── advanced_agent_flows_using_unified_memory_system_demo.py
│ ├── advanced_extraction_demo.py
│ ├── advanced_unified_memory_system_demo.py
│ ├── advanced_vector_search_demo.py
│ ├── analytics_reporting_demo.py
│ ├── audio_transcription_demo.py
│ ├── basic_completion_demo.py
│ ├── cache_demo.py
│ ├── claude_integration_demo.py
│ ├── compare_synthesize_demo.py
│ ├── cost_optimization.py
│ ├── data
│ │ ├── sample_event.txt
│ │ ├── Steve_Jobs_Introducing_The_iPhone_compressed.md
│ │ └── Steve_Jobs_Introducing_The_iPhone_compressed.mp3
│ ├── docstring_refiner_demo.py
│ ├── document_conversion_and_processing_demo.py
│ ├── entity_relation_graph_demo.py
│ ├── filesystem_operations_demo.py
│ ├── grok_integration_demo.py
│ ├── local_text_tools_demo.py
│ ├── marqo_fused_search_demo.py
│ ├── measure_model_speeds.py
│ ├── meta_api_demo.py
│ ├── multi_provider_demo.py
│ ├── ollama_integration_demo.py
│ ├── prompt_templates_demo.py
│ ├── python_sandbox_demo.py
│ ├── rag_example.py
│ ├── research_workflow_demo.py
│ ├── sample
│ │ ├── article.txt
│ │ ├── backprop_paper.pdf
│ │ ├── buffett.pdf
│ │ ├── contract_link.txt
│ │ ├── legal_contract.txt
│ │ ├── medical_case.txt
│ │ ├── northwind.db
│ │ ├── research_paper.txt
│ │ ├── sample_data.json
│ │ └── text_classification_samples
│ │ ├── email_classification.txt
│ │ ├── news_samples.txt
│ │ ├── product_reviews.txt
│ │ └── support_tickets.txt
│ ├── sample_docs
│ │ └── downloaded
│ │ └── attention_is_all_you_need.pdf
│ ├── sentiment_analysis_demo.py
│ ├── simple_completion_demo.py
│ ├── single_shot_synthesis_demo.py
│ ├── smart_browser_demo.py
│ ├── sql_database_demo.py
│ ├── sse_client_demo.py
│ ├── test_code_extraction.py
│ ├── test_content_detection.py
│ ├── test_ollama.py
│ ├── text_classification_demo.py
│ ├── text_redline_demo.py
│ ├── tool_composition_examples.py
│ ├── tournament_code_demo.py
│ ├── tournament_text_demo.py
│ ├── unified_memory_system_demo.py
│ ├── vector_search_demo.py
│ ├── web_automation_instruction_packs.py
│ └── workflow_delegation_demo.py
├── LICENSE
├── list_models.py
├── marqo_index_config.json.example
├── mcp_protocol_schema_2025-03-25_version.json
├── mcp_python_lib_docs.md
├── mcp_tool_context_estimator.py
├── model_preferences.py
├── pyproject.toml
├── quick_test.py
├── README.md
├── resource_annotations.py
├── run_all_demo_scripts_and_check_for_errors.py
├── storage
│ └── smart_browser_internal
│ ├── locator_cache.db
│ ├── readability.js
│ └── storage_state.enc
├── test_client.py
├── test_connection.py
├── TEST_README.md
├── test_sse_client.py
├── test_stdio_client.py
├── tests
│ ├── __init__.py
│ ├── conftest.py
│ ├── integration
│ │ ├── __init__.py
│ │ └── test_server.py
│ ├── manual
│ │ ├── test_extraction_advanced.py
│ │ └── test_extraction.py
│ └── unit
│ ├── __init__.py
│ ├── test_cache.py
│ ├── test_providers.py
│ └── test_tools.py
├── TODO.md
├── tool_annotations.py
├── tools_list.json
├── ultimate_mcp_banner.webp
├── ultimate_mcp_logo.webp
├── ultimate_mcp_server
│ ├── __init__.py
│ ├── __main__.py
│ ├── cli
│ │ ├── __init__.py
│ │ ├── __main__.py
│ │ ├── commands.py
│ │ ├── helpers.py
│ │ └── typer_cli.py
│ ├── clients
│ │ ├── __init__.py
│ │ ├── completion_client.py
│ │ └── rag_client.py
│ ├── config
│ │ └── examples
│ │ └── filesystem_config.yaml
│ ├── config.py
│ ├── constants.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── evaluation
│ │ │ ├── base.py
│ │ │ └── evaluators.py
│ │ ├── providers
│ │ │ ├── __init__.py
│ │ │ ├── anthropic.py
│ │ │ ├── base.py
│ │ │ ├── deepseek.py
│ │ │ ├── gemini.py
│ │ │ ├── grok.py
│ │ │ ├── ollama.py
│ │ │ ├── openai.py
│ │ │ └── openrouter.py
│ │ ├── server.py
│ │ ├── state_store.py
│ │ ├── tournaments
│ │ │ ├── manager.py
│ │ │ ├── tasks.py
│ │ │ └── utils.py
│ │ └── ums_api
│ │ ├── __init__.py
│ │ ├── ums_database.py
│ │ ├── ums_endpoints.py
│ │ ├── ums_models.py
│ │ └── ums_services.py
│ ├── exceptions.py
│ ├── graceful_shutdown.py
│ ├── services
│ │ ├── __init__.py
│ │ ├── analytics
│ │ │ ├── __init__.py
│ │ │ ├── metrics.py
│ │ │ └── reporting.py
│ │ ├── cache
│ │ │ ├── __init__.py
│ │ │ ├── cache_service.py
│ │ │ ├── persistence.py
│ │ │ ├── strategies.py
│ │ │ └── utils.py
│ │ ├── cache.py
│ │ ├── document.py
│ │ ├── knowledge_base
│ │ │ ├── __init__.py
│ │ │ ├── feedback.py
│ │ │ ├── manager.py
│ │ │ ├── rag_engine.py
│ │ │ ├── retriever.py
│ │ │ └── utils.py
│ │ ├── prompts
│ │ │ ├── __init__.py
│ │ │ ├── repository.py
│ │ │ └── templates.py
│ │ ├── prompts.py
│ │ └── vector
│ │ ├── __init__.py
│ │ ├── embeddings.py
│ │ └── vector_service.py
│ ├── tool_token_counter.py
│ ├── tools
│ │ ├── __init__.py
│ │ ├── audio_transcription.py
│ │ ├── base.py
│ │ ├── completion.py
│ │ ├── docstring_refiner.py
│ │ ├── document_conversion_and_processing.py
│ │ ├── enhanced-ums-lookbook.html
│ │ ├── entity_relation_graph.py
│ │ ├── excel_spreadsheet_automation.py
│ │ ├── extraction.py
│ │ ├── filesystem.py
│ │ ├── html_to_markdown.py
│ │ ├── local_text_tools.py
│ │ ├── marqo_fused_search.py
│ │ ├── meta_api_tool.py
│ │ ├── ocr_tools.py
│ │ ├── optimization.py
│ │ ├── provider.py
│ │ ├── pyodide_boot_template.html
│ │ ├── python_sandbox.py
│ │ ├── rag.py
│ │ ├── redline-compiled.css
│ │ ├── sentiment_analysis.py
│ │ ├── single_shot_synthesis.py
│ │ ├── smart_browser.py
│ │ ├── sql_databases.py
│ │ ├── text_classification.py
│ │ ├── text_redline_tools.py
│ │ ├── tournament.py
│ │ ├── ums_explorer.html
│ │ └── unified_memory_system.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── async_utils.py
│ │ ├── display.py
│ │ ├── logging
│ │ │ ├── __init__.py
│ │ │ ├── console.py
│ │ │ ├── emojis.py
│ │ │ ├── formatter.py
│ │ │ ├── logger.py
│ │ │ ├── panels.py
│ │ │ ├── progress.py
│ │ │ └── themes.py
│ │ ├── parse_yaml.py
│ │ ├── parsing.py
│ │ ├── security.py
│ │ └── text.py
│ └── working_memory_api.py
├── unified_memory_system_technical_analysis.md
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/storage/smart_browser_internal/readability.js:
--------------------------------------------------------------------------------
```javascript
/*
* Copyright (c) 2010 Arc90 Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* This code is heavily based on Arc90's readability.js (1.7.1) script
* available at: http://code.google.com/p/arc90labs-readability
*/
/**
* Public constructor.
* @param {HTMLDocument} doc The document to parse.
* @param {Object} options The options object.
*/
function Readability(doc, options) {
// In some older versions, people passed a URI as the first argument. Cope:
if (options && options.documentElement) {
doc = options;
options = arguments[2];
} else if (!doc || !doc.documentElement) {
throw new Error("First argument to Readability constructor should be a document object.");
}
options = options || {};
this._doc = doc;
this._docJSDOMParser = this._doc.firstChild.__JSDOMParser__;
this._articleTitle = null;
this._articleByline = null;
this._articleDir = null;
this._articleSiteName = null;
this._attempts = [];
// Configurable options
this._debug = !!options.debug;
this._maxElemsToParse = options.maxElemsToParse || this.DEFAULT_MAX_ELEMS_TO_PARSE;
this._nbTopCandidates = options.nbTopCandidates || this.DEFAULT_N_TOP_CANDIDATES;
this._charThreshold = options.charThreshold || this.DEFAULT_CHAR_THRESHOLD;
this._classesToPreserve = this.CLASSES_TO_PRESERVE.concat(options.classesToPreserve || []);
this._keepClasses = !!options.keepClasses;
this._serializer = options.serializer || function(el) {
return el.innerHTML;
};
this._disableJSONLD = !!options.disableJSONLD;
this._allowedVideoRegex = options.allowedVideoRegex || this.REGEXPS.videos;
// Start with all flags set
this._flags = this.FLAG_STRIP_UNLIKELYS |
this.FLAG_WEIGHT_CLASSES |
this.FLAG_CLEAN_CONDITIONALLY;
// Control whether log messages are sent to the console
if (this._debug) {
let logNode = function(node) {
if (node.nodeType == node.TEXT_NODE) {
return `${node.nodeName} ("${node.textContent}")`;
}
let attrPairs = Array.from(node.attributes || [], function(attr) {
return `${attr.name}="${attr.value}"`;
}).join(" ");
return `<${node.localName} ${attrPairs}>`;
};
this.log = function () {
if (typeof console !== "undefined") {
let args = Array.from(arguments, arg => {
if (arg && arg.nodeType == this.ELEMENT_NODE) {
return logNode(arg);
}
return arg;
});
args.unshift("Reader: (Readability)");
console.log.apply(console, args);
} else if (typeof dump !== "undefined") {
/* global dump */
var msg = Array.prototype.map.call(arguments, function(x) {
return (x && x.nodeName) ? logNode(x) : x;
}).join(" ");
dump("Reader: (Readability) " + msg + "\n");
}
};
} else {
this.log = function () {};
}
}
Readability.prototype = {
FLAG_STRIP_UNLIKELYS: 0x1,
FLAG_WEIGHT_CLASSES: 0x2,
FLAG_CLEAN_CONDITIONALLY: 0x4,
// https://developer.mozilla.org/en-US/docs/Web/API/Node/nodeType
ELEMENT_NODE: 1,
TEXT_NODE: 3,
// Max number of nodes supported by this parser. Default: 0 (no limit)
DEFAULT_MAX_ELEMS_TO_PARSE: 0,
// The number of top candidates to consider when analysing how
// tight the competition is among candidates.
DEFAULT_N_TOP_CANDIDATES: 5,
// Element tags to score by default.
DEFAULT_TAGS_TO_SCORE: "section,h2,h3,h4,h5,h6,p,td,pre".toUpperCase().split(","),
// The default number of chars an article must have in order to return a result
DEFAULT_CHAR_THRESHOLD: 500,
// All of the regular expressions in use within readability.
// Defined up here so we don't instantiate them repeatedly in loops.
REGEXPS: {
// NOTE: These two regular expressions are duplicated in
// Readability-readerable.js. Please keep both copies in sync.
unlikelyCandidates: /-ad-|ai2html|banner|breadcrumbs|combx|comment|community|cover-wrap|disqus|extra|footer|gdpr|header|legends|menu|related|remark|replies|rss|shoutbox|sidebar|skyscraper|social|sponsor|supplemental|ad-break|agegate|pagination|pager|popup|yom-remote/i,
okMaybeItsACandidate: /and|article|body|column|content|main|shadow/i,
positive: /article|body|content|entry|hentry|h-entry|main|page|pagination|post|text|blog|story/i,
negative: /-ad-|hidden|^hid$| hid$| hid |^hid |banner|combx|comment|com-|contact|foot|footer|footnote|gdpr|masthead|media|meta|outbrain|promo|related|scroll|share|shoutbox|sidebar|skyscraper|sponsor|shopping|tags|tool|widget/i,
extraneous: /print|archive|comment|discuss|e[\-]?mail|share|reply|all|login|sign|single|utility/i,
byline: /byline|author|dateline|writtenby|p-author/i,
replaceFonts: /<(\/?)font[^>]*>/gi,
normalize: /\s{2,}/g,
videos: /\/\/(www\.)?((dailymotion|youtube|youtube-nocookie|player\.vimeo|v\.qq)\.com|(archive|upload\.wikimedia)\.org|player\.twitch\.tv)/i,
shareElements: /(\b|_)(share|sharedaddy)(\b|_)/i,
nextLink: /(next|weiter|continue|>([^\|]|$)|»([^\|]|$))/i,
prevLink: /(prev|earl|old|new|<|«)/i,
tokenize: /\W+/g,
whitespace: /^\s*$/,
hasContent: /\S$/,
hashUrl: /^#.+/,
srcsetUrl: /(\S+)(\s+[\d.]+[xw])?(\s*(?:,|$))/g,
b64DataUrl: /^data:\s*([^\s;,]+)\s*;\s*base64\s*,/i,
// Commas as used in Latin, Sindhi, Chinese and various other scripts.
// see: https://en.wikipedia.org/wiki/Comma#Comma_variants
commas: /\u002C|\u060C|\uFE50|\uFE10|\uFE11|\u2E41|\u2E34|\u2E32|\uFF0C/g,
// See: https://schema.org/Article
jsonLdArticleTypes: /^Article|AdvertiserContentArticle|NewsArticle|AnalysisNewsArticle|AskPublicNewsArticle|BackgroundNewsArticle|OpinionNewsArticle|ReportageNewsArticle|ReviewNewsArticle|Report|SatiricalArticle|ScholarlyArticle|MedicalScholarlyArticle|SocialMediaPosting|BlogPosting|LiveBlogPosting|DiscussionForumPosting|TechArticle|APIReference$/
},
UNLIKELY_ROLES: [ "menu", "menubar", "complementary", "navigation", "alert", "alertdialog", "dialog" ],
DIV_TO_P_ELEMS: new Set([ "BLOCKQUOTE", "DL", "DIV", "IMG", "OL", "P", "PRE", "TABLE", "UL" ]),
ALTER_TO_DIV_EXCEPTIONS: ["DIV", "ARTICLE", "SECTION", "P"],
PRESENTATIONAL_ATTRIBUTES: [ "align", "background", "bgcolor", "border", "cellpadding", "cellspacing", "frame", "hspace", "rules", "style", "valign", "vspace" ],
DEPRECATED_SIZE_ATTRIBUTE_ELEMS: [ "TABLE", "TH", "TD", "HR", "PRE" ],
// The commented out elements qualify as phrasing content but tend to be
// removed by readability when put into paragraphs, so we ignore them here.
PHRASING_ELEMS: [
// "CANVAS", "IFRAME", "SVG", "VIDEO",
"ABBR", "AUDIO", "B", "BDO", "BR", "BUTTON", "CITE", "CODE", "DATA",
"DATALIST", "DFN", "EM", "EMBED", "I", "IMG", "INPUT", "KBD", "LABEL",
"MARK", "MATH", "METER", "NOSCRIPT", "OBJECT", "OUTPUT", "PROGRESS", "Q",
"RUBY", "SAMP", "SCRIPT", "SELECT", "SMALL", "SPAN", "STRONG", "SUB",
"SUP", "TEXTAREA", "TIME", "VAR", "WBR"
],
// These are the classes that readability sets itself.
CLASSES_TO_PRESERVE: [ "page" ],
// These are the list of HTML entities that need to be escaped.
HTML_ESCAPE_MAP: {
"lt": "<",
"gt": ">",
"amp": "&",
"quot": '"',
"apos": "'",
},
/**
* Run any post-process modifications to article content as necessary.
*
* @param Element
* @return void
**/
_postProcessContent: function(articleContent) {
// Readability cannot open relative uris so we convert them to absolute uris.
this._fixRelativeUris(articleContent);
this._simplifyNestedElements(articleContent);
if (!this._keepClasses) {
// Remove classes.
this._cleanClasses(articleContent);
}
},
/**
* Iterates over a NodeList, calls `filterFn` for each node and removes node
* if function returned `true`.
*
* If function is not passed, removes all the nodes in node list.
*
* @param NodeList nodeList The nodes to operate on
* @param Function filterFn the function to use as a filter
* @return void
*/
_removeNodes: function(nodeList, filterFn) {
// Avoid ever operating on live node lists.
if (this._docJSDOMParser && nodeList._isLiveNodeList) {
throw new Error("Do not pass live node lists to _removeNodes");
}
for (var i = nodeList.length - 1; i >= 0; i--) {
var node = nodeList[i];
var parentNode = node.parentNode;
if (parentNode) {
if (!filterFn || filterFn.call(this, node, i, nodeList)) {
parentNode.removeChild(node);
}
}
}
},
/**
* Iterates over a NodeList, and calls _setNodeTag for each node.
*
* @param NodeList nodeList The nodes to operate on
* @param String newTagName the new tag name to use
* @return void
*/
_replaceNodeTags: function(nodeList, newTagName) {
// Avoid ever operating on live node lists.
if (this._docJSDOMParser && nodeList._isLiveNodeList) {
throw new Error("Do not pass live node lists to _replaceNodeTags");
}
for (const node of nodeList) {
this._setNodeTag(node, newTagName);
}
},
/**
* Iterate over a NodeList, which doesn't natively fully implement the Array
* interface.
*
* For convenience, the current object context is applied to the provided
* iterate function.
*
* @param NodeList nodeList The NodeList.
* @param Function fn The iterate function.
* @return void
*/
_forEachNode: function(nodeList, fn) {
Array.prototype.forEach.call(nodeList, fn, this);
},
/**
* Iterate over a NodeList, and return the first node that passes
* the supplied test function
*
* For convenience, the current object context is applied to the provided
* test function.
*
* @param NodeList nodeList The NodeList.
* @param Function fn The test function.
* @return void
*/
_findNode: function(nodeList, fn) {
return Array.prototype.find.call(nodeList, fn, this);
},
/**
* Iterate over a NodeList, return true if any of the provided iterate
* function calls returns true, false otherwise.
*
* For convenience, the current object context is applied to the
* provided iterate function.
*
* @param NodeList nodeList The NodeList.
* @param Function fn The iterate function.
* @return Boolean
*/
_someNode: function(nodeList, fn) {
return Array.prototype.some.call(nodeList, fn, this);
},
/**
* Iterate over a NodeList, return true if all of the provided iterate
* function calls return true, false otherwise.
*
* For convenience, the current object context is applied to the
* provided iterate function.
*
* @param NodeList nodeList The NodeList.
* @param Function fn The iterate function.
* @return Boolean
*/
_everyNode: function(nodeList, fn) {
return Array.prototype.every.call(nodeList, fn, this);
},
/**
* Concat all nodelists passed as arguments.
*
* @return ...NodeList
* @return Array
*/
_concatNodeLists: function() {
var slice = Array.prototype.slice;
var args = slice.call(arguments);
var nodeLists = args.map(function(list) {
return slice.call(list);
});
return Array.prototype.concat.apply([], nodeLists);
},
_getAllNodesWithTag: function(node, tagNames) {
if (node.querySelectorAll) {
return node.querySelectorAll(tagNames.join(","));
}
return [].concat.apply([], tagNames.map(function(tag) {
var collection = node.getElementsByTagName(tag);
return Array.isArray(collection) ? collection : Array.from(collection);
}));
},
/**
* Removes the class="" attribute from every element in the given
* subtree, except those that match CLASSES_TO_PRESERVE and
* the classesToPreserve array from the options object.
*
* @param Element
* @return void
*/
_cleanClasses: function(node) {
var classesToPreserve = this._classesToPreserve;
var className = (node.getAttribute("class") || "")
.split(/\s+/)
.filter(function(cls) {
return classesToPreserve.indexOf(cls) != -1;
})
.join(" ");
if (className) {
node.setAttribute("class", className);
} else {
node.removeAttribute("class");
}
for (node = node.firstElementChild; node; node = node.nextElementSibling) {
this._cleanClasses(node);
}
},
/**
* Converts each <a> and <img> uri in the given element to an absolute URI,
* ignoring #ref URIs.
*
* @param Element
* @return void
*/
_fixRelativeUris: function(articleContent) {
var baseURI = this._doc.baseURI;
var documentURI = this._doc.documentURI;
function toAbsoluteURI(uri) {
// Leave hash links alone if the base URI matches the document URI:
if (baseURI == documentURI && uri.charAt(0) == "#") {
return uri;
}
// Otherwise, resolve against base URI:
try {
return new URL(uri, baseURI).href;
} catch (ex) {
// Something went wrong, just return the original:
}
return uri;
}
var links = this._getAllNodesWithTag(articleContent, ["a"]);
this._forEachNode(links, function(link) {
var href = link.getAttribute("href");
if (href) {
// Remove links with javascript: URIs, since
// they won't work after scripts have been removed from the page.
if (href.indexOf("javascript:") === 0) {
// if the link only contains simple text content, it can be converted to a text node
if (link.childNodes.length === 1 && link.childNodes[0].nodeType === this.TEXT_NODE) {
var text = this._doc.createTextNode(link.textContent);
link.parentNode.replaceChild(text, link);
} else {
// if the link has multiple children, they should all be preserved
var container = this._doc.createElement("span");
while (link.firstChild) {
container.appendChild(link.firstChild);
}
link.parentNode.replaceChild(container, link);
}
} else {
link.setAttribute("href", toAbsoluteURI(href));
}
}
});
var medias = this._getAllNodesWithTag(articleContent, [
"img", "picture", "figure", "video", "audio", "source"
]);
this._forEachNode(medias, function(media) {
var src = media.getAttribute("src");
var poster = media.getAttribute("poster");
var srcset = media.getAttribute("srcset");
if (src) {
media.setAttribute("src", toAbsoluteURI(src));
}
if (poster) {
media.setAttribute("poster", toAbsoluteURI(poster));
}
if (srcset) {
var newSrcset = srcset.replace(this.REGEXPS.srcsetUrl, function(_, p1, p2, p3) {
return toAbsoluteURI(p1) + (p2 || "") + p3;
});
media.setAttribute("srcset", newSrcset);
}
});
},
_simplifyNestedElements: function(articleContent) {
var node = articleContent;
while (node) {
if (node.parentNode && ["DIV", "SECTION"].includes(node.tagName) && !(node.id && node.id.startsWith("readability"))) {
if (this._isElementWithoutContent(node)) {
node = this._removeAndGetNext(node);
continue;
} else if (this._hasSingleTagInsideElement(node, "DIV") || this._hasSingleTagInsideElement(node, "SECTION")) {
var child = node.children[0];
for (var i = 0; i < node.attributes.length; i++) {
child.setAttribute(node.attributes[i].name, node.attributes[i].value);
}
node.parentNode.replaceChild(child, node);
node = child;
continue;
}
}
node = this._getNextNode(node);
}
},
/**
* Get the article title as an H1.
*
* @return string
**/
_getArticleTitle: function() {
var doc = this._doc;
var curTitle = "";
var origTitle = "";
try {
curTitle = origTitle = doc.title.trim();
// If they had an element with id "title" in their HTML
if (typeof curTitle !== "string")
curTitle = origTitle = this._getInnerText(doc.getElementsByTagName("title")[0]);
} catch (e) {/* ignore exceptions setting the title. */}
var titleHadHierarchicalSeparators = false;
function wordCount(str) {
return str.split(/\s+/).length;
}
// If there's a separator in the title, first remove the final part
if ((/ [\|\-\\\/>»] /).test(curTitle)) {
titleHadHierarchicalSeparators = / [\\\/>»] /.test(curTitle);
curTitle = origTitle.replace(/(.*)[\|\-\\\/>»] .*/gi, "$1");
// If the resulting title is too short (3 words or fewer), remove
// the first part instead:
if (wordCount(curTitle) < 3)
curTitle = origTitle.replace(/[^\|\-\\\/>»]*[\|\-\\\/>»](.*)/gi, "$1");
} else if (curTitle.indexOf(": ") !== -1) {
// Check if we have an heading containing this exact string, so we
// could assume it's the full title.
var headings = this._concatNodeLists(
doc.getElementsByTagName("h1"),
doc.getElementsByTagName("h2")
);
var trimmedTitle = curTitle.trim();
var match = this._someNode(headings, function(heading) {
return heading.textContent.trim() === trimmedTitle;
});
// If we don't, let's extract the title out of the original title string.
if (!match) {
curTitle = origTitle.substring(origTitle.lastIndexOf(":") + 1);
// If the title is now too short, try the first colon instead:
if (wordCount(curTitle) < 3) {
curTitle = origTitle.substring(origTitle.indexOf(":") + 1);
// But if we have too many words before the colon there's something weird
// with the titles and the H tags so let's just use the original title instead
} else if (wordCount(origTitle.substr(0, origTitle.indexOf(":"))) > 5) {
curTitle = origTitle;
}
}
} else if (curTitle.length > 150 || curTitle.length < 15) {
var hOnes = doc.getElementsByTagName("h1");
if (hOnes.length === 1)
curTitle = this._getInnerText(hOnes[0]);
}
curTitle = curTitle.trim().replace(this.REGEXPS.normalize, " ");
// If we now have 4 words or fewer as our title, and either no
// 'hierarchical' separators (\, /, > or ») were found in the original
// title or we decreased the number of words by more than 1 word, use
// the original title.
var curTitleWordCount = wordCount(curTitle);
if (curTitleWordCount <= 4 &&
(!titleHadHierarchicalSeparators ||
curTitleWordCount != wordCount(origTitle.replace(/[\|\-\\\/>»]+/g, "")) - 1)) {
curTitle = origTitle;
}
return curTitle;
},
/**
* Prepare the HTML document for readability to scrape it.
* This includes things like stripping javascript, CSS, and handling terrible markup.
*
* @return void
**/
_prepDocument: function() {
var doc = this._doc;
// Remove all style tags in head
this._removeNodes(this._getAllNodesWithTag(doc, ["style"]));
if (doc.body) {
this._replaceBrs(doc.body);
}
this._replaceNodeTags(this._getAllNodesWithTag(doc, ["font"]), "SPAN");
},
/**
* Finds the next node, starting from the given node, and ignoring
* whitespace in between. If the given node is an element, the same node is
* returned.
*/
_nextNode: function (node) {
var next = node;
while (next
&& (next.nodeType != this.ELEMENT_NODE)
&& this.REGEXPS.whitespace.test(next.textContent)) {
next = next.nextSibling;
}
return next;
},
/**
* Replaces 2 or more successive <br> elements with a single <p>.
* Whitespace between <br> elements are ignored. For example:
* <div>foo<br>bar<br> <br><br>abc</div>
* will become:
* <div>foo<br>bar<p>abc</p></div>
*/
_replaceBrs: function (elem) {
this._forEachNode(this._getAllNodesWithTag(elem, ["br"]), function(br) {
var next = br.nextSibling;
// Whether 2 or more <br> elements have been found and replaced with a
// <p> block.
var replaced = false;
// If we find a <br> chain, remove the <br>s until we hit another node
// or non-whitespace. This leaves behind the first <br> in the chain
// (which will be replaced with a <p> later).
while ((next = this._nextNode(next)) && (next.tagName == "BR")) {
replaced = true;
var brSibling = next.nextSibling;
next.parentNode.removeChild(next);
next = brSibling;
}
// If we removed a <br> chain, replace the remaining <br> with a <p>. Add
// all sibling nodes as children of the <p> until we hit another <br>
// chain.
if (replaced) {
var p = this._doc.createElement("p");
br.parentNode.replaceChild(p, br);
next = p.nextSibling;
while (next) {
// If we've hit another <br><br>, we're done adding children to this <p>.
if (next.tagName == "BR") {
var nextElem = this._nextNode(next.nextSibling);
if (nextElem && nextElem.tagName == "BR")
break;
}
if (!this._isPhrasingContent(next))
break;
// Otherwise, make this node a child of the new <p>.
var sibling = next.nextSibling;
p.appendChild(next);
next = sibling;
}
while (p.lastChild && this._isWhitespace(p.lastChild)) {
p.removeChild(p.lastChild);
}
if (p.parentNode.tagName === "P")
this._setNodeTag(p.parentNode, "DIV");
}
});
},
_setNodeTag: function (node, tag) {
this.log("_setNodeTag", node, tag);
if (this._docJSDOMParser) {
node.localName = tag.toLowerCase();
node.tagName = tag.toUpperCase();
return node;
}
var replacement = node.ownerDocument.createElement(tag);
while (node.firstChild) {
replacement.appendChild(node.firstChild);
}
node.parentNode.replaceChild(replacement, node);
if (node.readability)
replacement.readability = node.readability;
for (var i = 0; i < node.attributes.length; i++) {
try {
replacement.setAttribute(node.attributes[i].name, node.attributes[i].value);
} catch (ex) {
/* it's possible for setAttribute() to throw if the attribute name
* isn't a valid XML Name. Such attributes can however be parsed from
* source in HTML docs, see https://github.com/whatwg/html/issues/4275,
* so we can hit them here and then throw. We don't care about such
* attributes so we ignore them.
*/
}
}
return replacement;
},
/**
* Prepare the article node for display. Clean out any inline styles,
* iframes, forms, strip extraneous <p> tags, etc.
*
* @param Element
* @return void
**/
_prepArticle: function(articleContent) {
this._cleanStyles(articleContent);
// Check for data tables before we continue, to avoid removing items in
// those tables, which will often be isolated even though they're
// visually linked to other content-ful elements (text, images, etc.).
this._markDataTables(articleContent);
this._fixLazyImages(articleContent);
// Clean out junk from the article content
this._cleanConditionally(articleContent, "form");
this._cleanConditionally(articleContent, "fieldset");
this._clean(articleContent, "object");
this._clean(articleContent, "embed");
this._clean(articleContent, "footer");
this._clean(articleContent, "link");
this._clean(articleContent, "aside");
// Clean out elements with little content that have "share" in their id/class combinations from final top candidates,
// which means we don't remove the top candidates even they have "share".
var shareElementThreshold = this.DEFAULT_CHAR_THRESHOLD;
this._forEachNode(articleContent.children, function (topCandidate) {
this._cleanMatchedNodes(topCandidate, function (node, matchString) {
return this.REGEXPS.shareElements.test(matchString) && node.textContent.length < shareElementThreshold;
});
});
this._clean(articleContent, "iframe");
this._clean(articleContent, "input");
this._clean(articleContent, "textarea");
this._clean(articleContent, "select");
this._clean(articleContent, "button");
this._cleanHeaders(articleContent);
// Do these last as the previous stuff may have removed junk
// that will affect these
this._cleanConditionally(articleContent, "table");
this._cleanConditionally(articleContent, "ul");
this._cleanConditionally(articleContent, "div");
// replace H1 with H2 as H1 should be only title that is displayed separately
this._replaceNodeTags(this._getAllNodesWithTag(articleContent, ["h1"]), "h2");
// Remove extra paragraphs
this._removeNodes(this._getAllNodesWithTag(articleContent, ["p"]), function (paragraph) {
var imgCount = paragraph.getElementsByTagName("img").length;
var embedCount = paragraph.getElementsByTagName("embed").length;
var objectCount = paragraph.getElementsByTagName("object").length;
// At this point, nasty iframes have been removed, only remain embedded video ones.
var iframeCount = paragraph.getElementsByTagName("iframe").length;
var totalCount = imgCount + embedCount + objectCount + iframeCount;
return totalCount === 0 && !this._getInnerText(paragraph, false);
});
this._forEachNode(this._getAllNodesWithTag(articleContent, ["br"]), function(br) {
var next = this._nextNode(br.nextSibling);
if (next && next.tagName == "P")
br.parentNode.removeChild(br);
});
// Remove single-cell tables
this._forEachNode(this._getAllNodesWithTag(articleContent, ["table"]), function(table) {
var tbody = this._hasSingleTagInsideElement(table, "TBODY") ? table.firstElementChild : table;
if (this._hasSingleTagInsideElement(tbody, "TR")) {
var row = tbody.firstElementChild;
if (this._hasSingleTagInsideElement(row, "TD")) {
var cell = row.firstElementChild;
cell = this._setNodeTag(cell, this._everyNode(cell.childNodes, this._isPhrasingContent) ? "P" : "DIV");
table.parentNode.replaceChild(cell, table);
}
}
});
},
/**
* Initialize a node with the readability object. Also checks the
* className/id for special names to add to its score.
*
* @param Element
* @return void
**/
_initializeNode: function(node) {
node.readability = {"contentScore": 0};
switch (node.tagName) {
case "DIV":
node.readability.contentScore += 5;
break;
case "PRE":
case "TD":
case "BLOCKQUOTE":
node.readability.contentScore += 3;
break;
case "ADDRESS":
case "OL":
case "UL":
case "DL":
case "DD":
case "DT":
case "LI":
case "FORM":
node.readability.contentScore -= 3;
break;
case "H1":
case "H2":
case "H3":
case "H4":
case "H5":
case "H6":
case "TH":
node.readability.contentScore -= 5;
break;
}
node.readability.contentScore += this._getClassWeight(node);
},
_removeAndGetNext: function(node) {
var nextNode = this._getNextNode(node, true);
node.parentNode.removeChild(node);
return nextNode;
},
/**
* Traverse the DOM from node to node, starting at the node passed in.
* Pass true for the second parameter to indicate this node itself
* (and its kids) are going away, and we want the next node over.
*
* Calling this in a loop will traverse the DOM depth-first.
*/
_getNextNode: function(node, ignoreSelfAndKids) {
// First check for kids if those aren't being ignored
if (!ignoreSelfAndKids && node.firstElementChild) {
return node.firstElementChild;
}
// Then for siblings...
if (node.nextElementSibling) {
return node.nextElementSibling;
}
// And finally, move up the parent chain *and* find a sibling
// (because this is depth-first traversal, we will have already
// seen the parent nodes themselves).
do {
node = node.parentNode;
} while (node && !node.nextElementSibling);
return node && node.nextElementSibling;
},
// compares second text to first one
// 1 = same text, 0 = completely different text
// works the way that it splits both texts into words and then finds words that are unique in second text
// the result is given by the lower length of unique parts
_textSimilarity: function(textA, textB) {
var tokensA = textA.toLowerCase().split(this.REGEXPS.tokenize).filter(Boolean);
var tokensB = textB.toLowerCase().split(this.REGEXPS.tokenize).filter(Boolean);
if (!tokensA.length || !tokensB.length) {
return 0;
}
var uniqTokensB = tokensB.filter(token => !tokensA.includes(token));
var distanceB = uniqTokensB.join(" ").length / tokensB.join(" ").length;
return 1 - distanceB;
},
_checkByline: function(node, matchString) {
if (this._articleByline) {
return false;
}
if (node.getAttribute !== undefined) {
var rel = node.getAttribute("rel");
var itemprop = node.getAttribute("itemprop");
}
if ((rel === "author" || (itemprop && itemprop.indexOf("author") !== -1) || this.REGEXPS.byline.test(matchString)) && this._isValidByline(node.textContent)) {
this._articleByline = node.textContent.trim();
return true;
}
return false;
},
_getNodeAncestors: function(node, maxDepth) {
maxDepth = maxDepth || 0;
var i = 0, ancestors = [];
while (node.parentNode) {
ancestors.push(node.parentNode);
if (maxDepth && ++i === maxDepth)
break;
node = node.parentNode;
}
return ancestors;
},
/***
* grabArticle - Using a variety of metrics (content score, classname, element types), find the content that is
* most likely to be the stuff a user wants to read. Then return it wrapped up in a div.
*
* @param page a document to run upon. Needs to be a full document, complete with body.
* @return Element
**/
_grabArticle: function (page) {
this.log("**** grabArticle ****");
var doc = this._doc;
var isPaging = page !== null;
page = page ? page : this._doc.body;
// We can't grab an article if we don't have a page!
if (!page) {
this.log("No body found in document. Abort.");
return null;
}
var pageCacheHtml = page.innerHTML;
while (true) {
this.log("Starting grabArticle loop");
var stripUnlikelyCandidates = this._flagIsActive(this.FLAG_STRIP_UNLIKELYS);
// First, node prepping. Trash nodes that look cruddy (like ones with the
// class name "comment", etc), and turn divs into P tags where they have been
// used inappropriately (as in, where they contain no other block level elements.)
var elementsToScore = [];
var node = this._doc.documentElement;
let shouldRemoveTitleHeader = true;
while (node) {
if (node.tagName === "HTML") {
this._articleLang = node.getAttribute("lang");
}
var matchString = node.className + " " + node.id;
if (!this._isProbablyVisible(node)) {
this.log("Removing hidden node - " + matchString);
node = this._removeAndGetNext(node);
continue;
}
// User is not able to see elements applied with both "aria-modal = true" and "role = dialog"
if (node.getAttribute("aria-modal") == "true" && node.getAttribute("role") == "dialog") {
node = this._removeAndGetNext(node);
continue;
}
// Check to see if this node is a byline, and remove it if it is.
if (this._checkByline(node, matchString)) {
node = this._removeAndGetNext(node);
continue;
}
if (shouldRemoveTitleHeader && this._headerDuplicatesTitle(node)) {
this.log("Removing header: ", node.textContent.trim(), this._articleTitle.trim());
shouldRemoveTitleHeader = false;
node = this._removeAndGetNext(node);
continue;
}
// Remove unlikely candidates
if (stripUnlikelyCandidates) {
if (this.REGEXPS.unlikelyCandidates.test(matchString) &&
!this.REGEXPS.okMaybeItsACandidate.test(matchString) &&
!this._hasAncestorTag(node, "table") &&
!this._hasAncestorTag(node, "code") &&
node.tagName !== "BODY" &&
node.tagName !== "A") {
this.log("Removing unlikely candidate - " + matchString);
node = this._removeAndGetNext(node);
continue;
}
if (this.UNLIKELY_ROLES.includes(node.getAttribute("role"))) {
this.log("Removing content with role " + node.getAttribute("role") + " - " + matchString);
node = this._removeAndGetNext(node);
continue;
}
}
// Remove DIV, SECTION, and HEADER nodes without any content(e.g. text, image, video, or iframe).
if ((node.tagName === "DIV" || node.tagName === "SECTION" || node.tagName === "HEADER" ||
node.tagName === "H1" || node.tagName === "H2" || node.tagName === "H3" ||
node.tagName === "H4" || node.tagName === "H5" || node.tagName === "H6") &&
this._isElementWithoutContent(node)) {
node = this._removeAndGetNext(node);
continue;
}
if (this.DEFAULT_TAGS_TO_SCORE.indexOf(node.tagName) !== -1) {
elementsToScore.push(node);
}
// Turn all divs that don't have children block level elements into p's
if (node.tagName === "DIV") {
// Put phrasing content into paragraphs.
var p = null;
var childNode = node.firstChild;
while (childNode) {
var nextSibling = childNode.nextSibling;
if (this._isPhrasingContent(childNode)) {
if (p !== null) {
p.appendChild(childNode);
} else if (!this._isWhitespace(childNode)) {
p = doc.createElement("p");
node.replaceChild(p, childNode);
p.appendChild(childNode);
}
} else if (p !== null) {
while (p.lastChild && this._isWhitespace(p.lastChild)) {
p.removeChild(p.lastChild);
}
p = null;
}
childNode = nextSibling;
}
// Sites like http://mobile.slate.com encloses each paragraph with a DIV
// element. DIVs with only a P element inside and no text content can be
// safely converted into plain P elements to avoid confusing the scoring
// algorithm with DIVs with are, in practice, paragraphs.
if (this._hasSingleTagInsideElement(node, "P") && this._getLinkDensity(node) < 0.25) {
var newNode = node.children[0];
node.parentNode.replaceChild(newNode, node);
node = newNode;
elementsToScore.push(node);
} else if (!this._hasChildBlockElement(node)) {
node = this._setNodeTag(node, "P");
elementsToScore.push(node);
}
}
node = this._getNextNode(node);
}
/**
* Loop through all paragraphs, and assign a score to them based on how content-y they look.
* Then add their score to their parent node.
*
* A score is determined by things like number of commas, class names, etc. Maybe eventually link density.
**/
var candidates = [];
this._forEachNode(elementsToScore, function(elementToScore) {
if (!elementToScore.parentNode || typeof(elementToScore.parentNode.tagName) === "undefined")
return;
// If this paragraph is less than 25 characters, don't even count it.
var innerText = this._getInnerText(elementToScore);
if (innerText.length < 25)
return;
// Exclude nodes with no ancestor.
var ancestors = this._getNodeAncestors(elementToScore, 5);
if (ancestors.length === 0)
return;
var contentScore = 0;
// Add a point for the paragraph itself as a base.
contentScore += 1;
// Add points for any commas within this paragraph.
contentScore += innerText.split(this.REGEXPS.commas).length;
// For every 100 characters in this paragraph, add another point. Up to 3 points.
contentScore += Math.min(Math.floor(innerText.length / 100), 3);
// Initialize and score ancestors.
this._forEachNode(ancestors, function(ancestor, level) {
if (!ancestor.tagName || !ancestor.parentNode || typeof(ancestor.parentNode.tagName) === "undefined")
return;
if (typeof(ancestor.readability) === "undefined") {
this._initializeNode(ancestor);
candidates.push(ancestor);
}
// Node score divider:
// - parent: 1 (no division)
// - grandparent: 2
// - great grandparent+: ancestor level * 3
if (level === 0)
var scoreDivider = 1;
else if (level === 1)
scoreDivider = 2;
else
scoreDivider = level * 3;
ancestor.readability.contentScore += contentScore / scoreDivider;
});
});
// After we've calculated scores, loop through all of the possible
// candidate nodes we found and find the one with the highest score.
var topCandidates = [];
for (var c = 0, cl = candidates.length; c < cl; c += 1) {
var candidate = candidates[c];
// Scale the final candidates score based on link density. Good content
// should have a relatively small link density (5% or less) and be mostly
// unaffected by this operation.
var candidateScore = candidate.readability.contentScore * (1 - this._getLinkDensity(candidate));
candidate.readability.contentScore = candidateScore;
this.log("Candidate:", candidate, "with score " + candidateScore);
for (var t = 0; t < this._nbTopCandidates; t++) {
var aTopCandidate = topCandidates[t];
if (!aTopCandidate || candidateScore > aTopCandidate.readability.contentScore) {
topCandidates.splice(t, 0, candidate);
if (topCandidates.length > this._nbTopCandidates)
topCandidates.pop();
break;
}
}
}
var topCandidate = topCandidates[0] || null;
var neededToCreateTopCandidate = false;
var parentOfTopCandidate;
// If we still have no top candidate, just use the body as a last resort.
// We also have to copy the body node so it is something we can modify.
if (topCandidate === null || topCandidate.tagName === "BODY") {
// Move all of the page's children into topCandidate
topCandidate = doc.createElement("DIV");
neededToCreateTopCandidate = true;
// Move everything (not just elements, also text nodes etc.) into the container
// so we even include text directly in the body:
while (page.firstChild) {
this.log("Moving child out:", page.firstChild);
topCandidate.appendChild(page.firstChild);
}
page.appendChild(topCandidate);
this._initializeNode(topCandidate);
} else if (topCandidate) {
// Find a better top candidate node if it contains (at least three) nodes which belong to `topCandidates` array
// and whose scores are quite closed with current `topCandidate` node.
var alternativeCandidateAncestors = [];
for (var i = 1; i < topCandidates.length; i++) {
if (topCandidates[i].readability.contentScore / topCandidate.readability.contentScore >= 0.75) {
alternativeCandidateAncestors.push(this._getNodeAncestors(topCandidates[i]));
}
}
var MINIMUM_TOPCANDIDATES = 3;
if (alternativeCandidateAncestors.length >= MINIMUM_TOPCANDIDATES) {
parentOfTopCandidate = topCandidate.parentNode;
while (parentOfTopCandidate.tagName !== "BODY") {
var listsContainingThisAncestor = 0;
for (var ancestorIndex = 0; ancestorIndex < alternativeCandidateAncestors.length && listsContainingThisAncestor < MINIMUM_TOPCANDIDATES; ancestorIndex++) {
listsContainingThisAncestor += Number(alternativeCandidateAncestors[ancestorIndex].includes(parentOfTopCandidate));
}
if (listsContainingThisAncestor >= MINIMUM_TOPCANDIDATES) {
topCandidate = parentOfTopCandidate;
break;
}
parentOfTopCandidate = parentOfTopCandidate.parentNode;
}
}
if (!topCandidate.readability) {
this._initializeNode(topCandidate);
}
// Because of our bonus system, parents of candidates might have scores
// themselves. They get half of the node. There won't be nodes with higher
// scores than our topCandidate, but if we see the score going *up* in the first
// few steps up the tree, that's a decent sign that there might be more content
// lurking in other places that we want to unify in. The sibling stuff
// below does some of that - but only if we've looked high enough up the DOM
// tree.
parentOfTopCandidate = topCandidate.parentNode;
var lastScore = topCandidate.readability.contentScore;
// The scores shouldn't get too low.
var scoreThreshold = lastScore / 3;
while (parentOfTopCandidate.tagName !== "BODY") {
if (!parentOfTopCandidate.readability) {
parentOfTopCandidate = parentOfTopCandidate.parentNode;
continue;
}
var parentScore = parentOfTopCandidate.readability.contentScore;
if (parentScore < scoreThreshold)
break;
if (parentScore > lastScore) {
// Alright! We found a better parent to use.
topCandidate = parentOfTopCandidate;
break;
}
lastScore = parentOfTopCandidate.readability.contentScore;
parentOfTopCandidate = parentOfTopCandidate.parentNode;
}
// If the top candidate is the only child, use parent instead. This will help sibling
// joining logic when adjacent content is actually located in parent's sibling node.
parentOfTopCandidate = topCandidate.parentNode;
while (parentOfTopCandidate.tagName != "BODY" && parentOfTopCandidate.children.length == 1) {
topCandidate = parentOfTopCandidate;
parentOfTopCandidate = topCandidate.parentNode;
}
if (!topCandidate.readability) {
this._initializeNode(topCandidate);
}
}
// Now that we have the top candidate, look through its siblings for content
// that might also be related. Things like preambles, content split by ads
// that we removed, etc.
var articleContent = doc.createElement("DIV");
if (isPaging)
articleContent.id = "readability-content";
var siblingScoreThreshold = Math.max(10, topCandidate.readability.contentScore * 0.2);
// Keep potential top candidate's parent node to try to get text direction of it later.
parentOfTopCandidate = topCandidate.parentNode;
var siblings = parentOfTopCandidate.children;
for (var s = 0, sl = siblings.length; s < sl; s++) {
var sibling = siblings[s];
var append = false;
this.log("Looking at sibling node:", sibling, sibling.readability ? ("with score " + sibling.readability.contentScore) : "");
this.log("Sibling has score", sibling.readability ? sibling.readability.contentScore : "Unknown");
if (sibling === topCandidate) {
append = true;
} else {
var contentBonus = 0;
// Give a bonus if sibling nodes and top candidates have the example same classname
if (sibling.className === topCandidate.className && topCandidate.className !== "")
contentBonus += topCandidate.readability.contentScore * 0.2;
if (sibling.readability &&
((sibling.readability.contentScore + contentBonus) >= siblingScoreThreshold)) {
append = true;
} else if (sibling.nodeName === "P") {
var linkDensity = this._getLinkDensity(sibling);
var nodeContent = this._getInnerText(sibling);
var nodeLength = nodeContent.length;
if (nodeLength > 80 && linkDensity < 0.25) {
append = true;
} else if (nodeLength < 80 && nodeLength > 0 && linkDensity === 0 &&
nodeContent.search(/\.( |$)/) !== -1) {
append = true;
}
}
}
if (append) {
this.log("Appending node:", sibling);
if (this.ALTER_TO_DIV_EXCEPTIONS.indexOf(sibling.nodeName) === -1) {
// We have a node that isn't a common block level element, like a form or td tag.
// Turn it into a div so it doesn't get filtered out later by accident.
this.log("Altering sibling:", sibling, "to div.");
sibling = this._setNodeTag(sibling, "DIV");
}
articleContent.appendChild(sibling);
// Fetch children again to make it compatible
// with DOM parsers without live collection support.
siblings = parentOfTopCandidate.children;
// siblings is a reference to the children array, and
// sibling is removed from the array when we call appendChild().
// As a result, we must revisit this index since the nodes
// have been shifted.
s -= 1;
sl -= 1;
}
}
if (this._debug)
this.log("Article content pre-prep: " + articleContent.innerHTML);
// So we have all of the content that we need. Now we clean it up for presentation.
this._prepArticle(articleContent);
if (this._debug)
this.log("Article content post-prep: " + articleContent.innerHTML);
if (neededToCreateTopCandidate) {
// We already created a fake div thing, and there wouldn't have been any siblings left
// for the previous loop, so there's no point trying to create a new div, and then
// move all the children over. Just assign IDs and class names here. No need to append
// because that already happened anyway.
topCandidate.id = "readability-page-1";
topCandidate.className = "page";
} else {
var div = doc.createElement("DIV");
div.id = "readability-page-1";
div.className = "page";
while (articleContent.firstChild) {
div.appendChild(articleContent.firstChild);
}
articleContent.appendChild(div);
}
if (this._debug)
this.log("Article content after paging: " + articleContent.innerHTML);
var parseSuccessful = true;
// Now that we've gone through the full algorithm, check to see if
// we got any meaningful content. If we didn't, we may need to re-run
// grabArticle with different flags set. This gives us a higher likelihood of
// finding the content, and the sieve approach gives us a higher likelihood of
// finding the -right- content.
var textLength = this._getInnerText(articleContent, true).length;
if (textLength < this._charThreshold) {
parseSuccessful = false;
page.innerHTML = pageCacheHtml;
if (this._flagIsActive(this.FLAG_STRIP_UNLIKELYS)) {
this._removeFlag(this.FLAG_STRIP_UNLIKELYS);
this._attempts.push({articleContent: articleContent, textLength: textLength});
} else if (this._flagIsActive(this.FLAG_WEIGHT_CLASSES)) {
this._removeFlag(this.FLAG_WEIGHT_CLASSES);
this._attempts.push({articleContent: articleContent, textLength: textLength});
} else if (this._flagIsActive(this.FLAG_CLEAN_CONDITIONALLY)) {
this._removeFlag(this.FLAG_CLEAN_CONDITIONALLY);
this._attempts.push({articleContent: articleContent, textLength: textLength});
} else {
this._attempts.push({articleContent: articleContent, textLength: textLength});
// No luck after removing flags, just return the longest text we found during the different loops
this._attempts.sort(function (a, b) {
return b.textLength - a.textLength;
});
// But first check if we actually have something
if (!this._attempts[0].textLength) {
return null;
}
articleContent = this._attempts[0].articleContent;
parseSuccessful = true;
}
}
if (parseSuccessful) {
// Find out text direction from ancestors of final top candidate.
var ancestors = [parentOfTopCandidate, topCandidate].concat(this._getNodeAncestors(parentOfTopCandidate));
this._someNode(ancestors, function(ancestor) {
if (!ancestor.tagName)
return false;
var articleDir = ancestor.getAttribute("dir");
if (articleDir) {
this._articleDir = articleDir;
return true;
}
return false;
});
return articleContent;
}
}
},
/**
* Check whether the input string could be a byline.
* This verifies that the input is a string, and that the length
* is less than 100 chars.
*
* @param possibleByline {string} - a string to check whether its a byline.
* @return Boolean - whether the input string is a byline.
*/
_isValidByline: function(byline) {
if (typeof byline == "string" || byline instanceof String) {
byline = byline.trim();
return (byline.length > 0) && (byline.length < 100);
}
return false;
},
/**
* Converts some of the common HTML entities in string to their corresponding characters.
*
* @param str {string} - a string to unescape.
* @return string without HTML entity.
*/
_unescapeHtmlEntities: function(str) {
if (!str) {
return str;
}
var htmlEscapeMap = this.HTML_ESCAPE_MAP;
return str.replace(/&(quot|amp|apos|lt|gt);/g, function(_, tag) {
return htmlEscapeMap[tag];
}).replace(/&#(?:x([0-9a-z]{1,4})|([0-9]{1,4}));/gi, function(_, hex, numStr) {
var num = parseInt(hex || numStr, hex ? 16 : 10);
return String.fromCharCode(num);
});
},
/**
* Try to extract metadata from JSON-LD object.
* For now, only Schema.org objects of type Article or its subtypes are supported.
* @return Object with any metadata that could be extracted (possibly none)
*/
_getJSONLD: function (doc) {
var scripts = this._getAllNodesWithTag(doc, ["script"]);
var metadata;
this._forEachNode(scripts, function(jsonLdElement) {
if (!metadata && jsonLdElement.getAttribute("type") === "application/ld+json") {
try {
// Strip CDATA markers if present
var content = jsonLdElement.textContent.replace(/^\s*<!\[CDATA\[|\]\]>\s*$/g, "");
var parsed = JSON.parse(content);
if (
!parsed["@context"] ||
!parsed["@context"].match(/^https?\:\/\/schema\.org$/)
) {
return;
}
if (!parsed["@type"] && Array.isArray(parsed["@graph"])) {
parsed = parsed["@graph"].find(function(it) {
return (it["@type"] || "").match(
this.REGEXPS.jsonLdArticleTypes
);
});
}
if (
!parsed ||
!parsed["@type"] ||
!parsed["@type"].match(this.REGEXPS.jsonLdArticleTypes)
) {
return;
}
metadata = {};
if (typeof parsed.name === "string" && typeof parsed.headline === "string" && parsed.name !== parsed.headline) {
// we have both name and headline element in the JSON-LD. They should both be the same but some websites like aktualne.cz
// put their own name into "name" and the article title to "headline" which confuses Readability. So we try to check if either
// "name" or "headline" closely matches the html title, and if so, use that one. If not, then we use "name" by default.
var title = this._getArticleTitle();
var nameMatches = this._textSimilarity(parsed.name, title) > 0.75;
var headlineMatches = this._textSimilarity(parsed.headline, title) > 0.75;
if (headlineMatches && !nameMatches) {
metadata.title = parsed.headline;
} else {
metadata.title = parsed.name;
}
} else if (typeof parsed.name === "string") {
metadata.title = parsed.name.trim();
} else if (typeof parsed.headline === "string") {
metadata.title = parsed.headline.trim();
}
if (parsed.author) {
if (typeof parsed.author.name === "string") {
metadata.byline = parsed.author.name.trim();
} else if (Array.isArray(parsed.author) && parsed.author[0] && typeof parsed.author[0].name === "string") {
metadata.byline = parsed.author
.filter(function(author) {
return author && typeof author.name === "string";
})
.map(function(author) {
return author.name.trim();
})
.join(", ");
}
}
if (typeof parsed.description === "string") {
metadata.excerpt = parsed.description.trim();
}
if (
parsed.publisher &&
typeof parsed.publisher.name === "string"
) {
metadata.siteName = parsed.publisher.name.trim();
}
if (typeof parsed.datePublished === "string") {
metadata.datePublished = parsed.datePublished.trim();
}
return;
} catch (err) {
this.log(err.message);
}
}
});
return metadata ? metadata : {};
},
/**
* Attempts to get excerpt and byline metadata for the article.
*
* @param {Object} jsonld — object containing any metadata that
* could be extracted from JSON-LD object.
*
* @return Object with optional "excerpt" and "byline" properties
*/
_getArticleMetadata: function(jsonld) {
var metadata = {};
var values = {};
var metaElements = this._doc.getElementsByTagName("meta");
// property is a space-separated list of values
var propertyPattern = /\s*(article|dc|dcterm|og|twitter)\s*:\s*(author|creator|description|published_time|title|site_name)\s*/gi;
// name is a single value
var namePattern = /^\s*(?:(dc|dcterm|og|twitter|weibo:(article|webpage))\s*[\.:]\s*)?(author|creator|description|title|site_name)\s*$/i;
// Find description tags.
this._forEachNode(metaElements, function(element) {
var elementName = element.getAttribute("name");
var elementProperty = element.getAttribute("property");
var content = element.getAttribute("content");
if (!content) {
return;
}
var matches = null;
var name = null;
if (elementProperty) {
matches = elementProperty.match(propertyPattern);
if (matches) {
// Convert to lowercase, and remove any whitespace
// so we can match below.
name = matches[0].toLowerCase().replace(/\s/g, "");
// multiple authors
values[name] = content.trim();
}
}
if (!matches && elementName && namePattern.test(elementName)) {
name = elementName;
if (content) {
// Convert to lowercase, remove any whitespace, and convert dots
// to colons so we can match below.
name = name.toLowerCase().replace(/\s/g, "").replace(/\./g, ":");
values[name] = content.trim();
}
}
});
// get title
metadata.title = jsonld.title ||
values["dc:title"] ||
values["dcterm:title"] ||
values["og:title"] ||
values["weibo:article:title"] ||
values["weibo:webpage:title"] ||
values["title"] ||
values["twitter:title"];
if (!metadata.title) {
metadata.title = this._getArticleTitle();
}
// get author
metadata.byline = jsonld.byline ||
values["dc:creator"] ||
values["dcterm:creator"] ||
values["author"];
// get description
metadata.excerpt = jsonld.excerpt ||
values["dc:description"] ||
values["dcterm:description"] ||
values["og:description"] ||
values["weibo:article:description"] ||
values["weibo:webpage:description"] ||
values["description"] ||
values["twitter:description"];
// get site name
metadata.siteName = jsonld.siteName ||
values["og:site_name"];
// get article published time
metadata.publishedTime = jsonld.datePublished ||
values["article:published_time"] || null;
// in many sites the meta value is escaped with HTML entities,
// so here we need to unescape it
metadata.title = this._unescapeHtmlEntities(metadata.title);
metadata.byline = this._unescapeHtmlEntities(metadata.byline);
metadata.excerpt = this._unescapeHtmlEntities(metadata.excerpt);
metadata.siteName = this._unescapeHtmlEntities(metadata.siteName);
metadata.publishedTime = this._unescapeHtmlEntities(metadata.publishedTime);
return metadata;
},
/**
* Check if node is image, or if node contains exactly only one image
* whether as a direct child or as its descendants.
*
* @param Element
**/
_isSingleImage: function(node) {
if (node.tagName === "IMG") {
return true;
}
if (node.children.length !== 1 || node.textContent.trim() !== "") {
return false;
}
return this._isSingleImage(node.children[0]);
},
/**
* Find all <noscript> that are located after <img> nodes, and which contain only one
* <img> element. Replace the first image with the image from inside the <noscript> tag,
* and remove the <noscript> tag. This improves the quality of the images we use on
* some sites (e.g. Medium).
*
* @param Element
**/
_unwrapNoscriptImages: function(doc) {
// Find img without source or attributes that might contains image, and remove it.
// This is done to prevent a placeholder img is replaced by img from noscript in next step.
var imgs = Array.from(doc.getElementsByTagName("img"));
this._forEachNode(imgs, function(img) {
for (var i = 0; i < img.attributes.length; i++) {
var attr = img.attributes[i];
switch (attr.name) {
case "src":
case "srcset":
case "data-src":
case "data-srcset":
return;
}
if (/\.(jpg|jpeg|png|webp)/i.test(attr.value)) {
return;
}
}
img.parentNode.removeChild(img);
});
// Next find noscript and try to extract its image
var noscripts = Array.from(doc.getElementsByTagName("noscript"));
this._forEachNode(noscripts, function(noscript) {
// Parse content of noscript and make sure it only contains image
var tmp = doc.createElement("div");
tmp.innerHTML = noscript.innerHTML;
if (!this._isSingleImage(tmp)) {
return;
}
// If noscript has previous sibling and it only contains image,
// replace it with noscript content. However we also keep old
// attributes that might contains image.
var prevElement = noscript.previousElementSibling;
if (prevElement && this._isSingleImage(prevElement)) {
var prevImg = prevElement;
if (prevImg.tagName !== "IMG") {
prevImg = prevElement.getElementsByTagName("img")[0];
}
var newImg = tmp.getElementsByTagName("img")[0];
for (var i = 0; i < prevImg.attributes.length; i++) {
var attr = prevImg.attributes[i];
if (attr.value === "") {
continue;
}
if (attr.name === "src" || attr.name === "srcset" || /\.(jpg|jpeg|png|webp)/i.test(attr.value)) {
if (newImg.getAttribute(attr.name) === attr.value) {
continue;
}
var attrName = attr.name;
if (newImg.hasAttribute(attrName)) {
attrName = "data-old-" + attrName;
}
newImg.setAttribute(attrName, attr.value);
}
}
noscript.parentNode.replaceChild(tmp.firstElementChild, prevElement);
}
});
},
/**
* Removes script tags from the document.
*
* @param Element
**/
_removeScripts: function(doc) {
this._removeNodes(this._getAllNodesWithTag(doc, ["script", "noscript"]));
},
/**
* Check if this node has only whitespace and a single element with given tag
* Returns false if the DIV node contains non-empty text nodes
* or if it contains no element with given tag or more than 1 element.
*
* @param Element
* @param string tag of child element
**/
_hasSingleTagInsideElement: function(element, tag) {
// There should be exactly 1 element child with given tag
if (element.children.length != 1 || element.children[0].tagName !== tag) {
return false;
}
// And there should be no text nodes with real content
return !this._someNode(element.childNodes, function(node) {
return node.nodeType === this.TEXT_NODE &&
this.REGEXPS.hasContent.test(node.textContent);
});
},
_isElementWithoutContent: function(node) {
return node.nodeType === this.ELEMENT_NODE &&
node.textContent.trim().length == 0 &&
(node.children.length == 0 ||
node.children.length == node.getElementsByTagName("br").length + node.getElementsByTagName("hr").length);
},
/**
* Determine whether element has any children block level elements.
*
* @param Element
*/
_hasChildBlockElement: function (element) {
return this._someNode(element.childNodes, function(node) {
return this.DIV_TO_P_ELEMS.has(node.tagName) ||
this._hasChildBlockElement(node);
});
},
/***
* Determine if a node qualifies as phrasing content.
* https://developer.mozilla.org/en-US/docs/Web/Guide/HTML/Content_categories#Phrasing_content
**/
_isPhrasingContent: function(node) {
return node.nodeType === this.TEXT_NODE || this.PHRASING_ELEMS.indexOf(node.tagName) !== -1 ||
((node.tagName === "A" || node.tagName === "DEL" || node.tagName === "INS") &&
this._everyNode(node.childNodes, this._isPhrasingContent));
},
_isWhitespace: function(node) {
return (node.nodeType === this.TEXT_NODE && node.textContent.trim().length === 0) ||
(node.nodeType === this.ELEMENT_NODE && node.tagName === "BR");
},
/**
* Get the inner text of a node - cross browser compatibly.
* This also strips out any excess whitespace to be found.
*
* @param Element
* @param Boolean normalizeSpaces (default: true)
* @return string
**/
_getInnerText: function(e, normalizeSpaces) {
normalizeSpaces = (typeof normalizeSpaces === "undefined") ? true : normalizeSpaces;
var textContent = e.textContent.trim();
if (normalizeSpaces) {
return textContent.replace(this.REGEXPS.normalize, " ");
}
return textContent;
},
/**
* Get the number of times a string s appears in the node e.
*
* @param Element
* @param string - what to split on. Default is ","
* @return number (integer)
**/
_getCharCount: function(e, s) {
s = s || ",";
return this._getInnerText(e).split(s).length - 1;
},
/**
* Remove the style attribute on every e and under.
* TODO: Test if getElementsByTagName(*) is faster.
*
* @param Element
* @return void
**/
_cleanStyles: function(e) {
if (!e || e.tagName.toLowerCase() === "svg")
return;
// Remove `style` and deprecated presentational attributes
for (var i = 0; i < this.PRESENTATIONAL_ATTRIBUTES.length; i++) {
e.removeAttribute(this.PRESENTATIONAL_ATTRIBUTES[i]);
}
if (this.DEPRECATED_SIZE_ATTRIBUTE_ELEMS.indexOf(e.tagName) !== -1) {
e.removeAttribute("width");
e.removeAttribute("height");
}
var cur = e.firstElementChild;
while (cur !== null) {
this._cleanStyles(cur);
cur = cur.nextElementSibling;
}
},
/**
* Get the density of links as a percentage of the content
* This is the amount of text that is inside a link divided by the total text in the node.
*
* @param Element
* @return number (float)
**/
_getLinkDensity: function(element) {
var textLength = this._getInnerText(element).length;
if (textLength === 0)
return 0;
var linkLength = 0;
// XXX implement _reduceNodeList?
this._forEachNode(element.getElementsByTagName("a"), function(linkNode) {
var href = linkNode.getAttribute("href");
var coefficient = href && this.REGEXPS.hashUrl.test(href) ? 0.3 : 1;
linkLength += this._getInnerText(linkNode).length * coefficient;
});
return linkLength / textLength;
},
/**
* Get an elements class/id weight. Uses regular expressions to tell if this
* element looks good or bad.
*
* @param Element
* @return number (Integer)
**/
_getClassWeight: function(e) {
if (!this._flagIsActive(this.FLAG_WEIGHT_CLASSES))
return 0;
var weight = 0;
// Look for a special classname
if (typeof(e.className) === "string" && e.className !== "") {
if (this.REGEXPS.negative.test(e.className))
weight -= 25;
if (this.REGEXPS.positive.test(e.className))
weight += 25;
}
// Look for a special ID
if (typeof(e.id) === "string" && e.id !== "") {
if (this.REGEXPS.negative.test(e.id))
weight -= 25;
if (this.REGEXPS.positive.test(e.id))
weight += 25;
}
return weight;
},
/**
* Clean a node of all elements of type "tag".
* (Unless it's a youtube/vimeo video. People love movies.)
*
* @param Element
* @param string tag to clean
* @return void
**/
_clean: function(e, tag) {
var isEmbed = ["object", "embed", "iframe"].indexOf(tag) !== -1;
this._removeNodes(this._getAllNodesWithTag(e, [tag]), function(element) {
// Allow youtube and vimeo videos through as people usually want to see those.
if (isEmbed) {
// First, check the elements attributes to see if any of them contain youtube or vimeo
for (var i = 0; i < element.attributes.length; i++) {
if (this._allowedVideoRegex.test(element.attributes[i].value)) {
return false;
}
}
// For embed with <object> tag, check inner HTML as well.
if (element.tagName === "object" && this._allowedVideoRegex.test(element.innerHTML)) {
return false;
}
}
return true;
});
},
/**
* Check if a given node has one of its ancestor tag name matching the
* provided one.
* @param HTMLElement node
* @param String tagName
* @param Number maxDepth
* @param Function filterFn a filter to invoke to determine whether this node 'counts'
* @return Boolean
*/
_hasAncestorTag: function(node, tagName, maxDepth, filterFn) {
maxDepth = maxDepth || 3;
tagName = tagName.toUpperCase();
var depth = 0;
while (node.parentNode) {
if (maxDepth > 0 && depth > maxDepth)
return false;
if (node.parentNode.tagName === tagName && (!filterFn || filterFn(node.parentNode)))
return true;
node = node.parentNode;
depth++;
}
return false;
},
/**
* Return an object indicating how many rows and columns this table has.
*/
_getRowAndColumnCount: function(table) {
var rows = 0;
var columns = 0;
var trs = table.getElementsByTagName("tr");
for (var i = 0; i < trs.length; i++) {
var rowspan = trs[i].getAttribute("rowspan") || 0;
if (rowspan) {
rowspan = parseInt(rowspan, 10);
}
rows += (rowspan || 1);
// Now look for column-related info
var columnsInThisRow = 0;
var cells = trs[i].getElementsByTagName("td");
for (var j = 0; j < cells.length; j++) {
var colspan = cells[j].getAttribute("colspan") || 0;
if (colspan) {
colspan = parseInt(colspan, 10);
}
columnsInThisRow += (colspan || 1);
}
columns = Math.max(columns, columnsInThisRow);
}
return {rows: rows, columns: columns};
},
/**
* Look for 'data' (as opposed to 'layout') tables, for which we use
* similar checks as
* https://searchfox.org/mozilla-central/rev/f82d5c549f046cb64ce5602bfd894b7ae807c8f8/accessible/generic/TableAccessible.cpp#19
*/
_markDataTables: function(root) {
var tables = root.getElementsByTagName("table");
for (var i = 0; i < tables.length; i++) {
var table = tables[i];
var role = table.getAttribute("role");
if (role == "presentation") {
table._readabilityDataTable = false;
continue;
}
var datatable = table.getAttribute("datatable");
if (datatable == "0") {
table._readabilityDataTable = false;
continue;
}
var summary = table.getAttribute("summary");
if (summary) {
table._readabilityDataTable = true;
continue;
}
var caption = table.getElementsByTagName("caption")[0];
if (caption && caption.childNodes.length > 0) {
table._readabilityDataTable = true;
continue;
}
// If the table has a descendant with any of these tags, consider a data table:
var dataTableDescendants = ["col", "colgroup", "tfoot", "thead", "th"];
var descendantExists = function(tag) {
return !!table.getElementsByTagName(tag)[0];
};
if (dataTableDescendants.some(descendantExists)) {
this.log("Data table because found data-y descendant");
table._readabilityDataTable = true;
continue;
}
// Nested tables indicate a layout table:
if (table.getElementsByTagName("table")[0]) {
table._readabilityDataTable = false;
continue;
}
var sizeInfo = this._getRowAndColumnCount(table);
if (sizeInfo.rows >= 10 || sizeInfo.columns > 4) {
table._readabilityDataTable = true;
continue;
}
// Now just go by size entirely:
table._readabilityDataTable = sizeInfo.rows * sizeInfo.columns > 10;
}
},
/* convert images and figures that have properties like data-src into images that can be loaded without JS */
_fixLazyImages: function (root) {
this._forEachNode(this._getAllNodesWithTag(root, ["img", "picture", "figure"]), function (elem) {
// In some sites (e.g. Kotaku), they put 1px square image as base64 data uri in the src attribute.
// So, here we check if the data uri is too short, just might as well remove it.
if (elem.src && this.REGEXPS.b64DataUrl.test(elem.src)) {
// Make sure it's not SVG, because SVG can have a meaningful image in under 133 bytes.
var parts = this.REGEXPS.b64DataUrl.exec(elem.src);
if (parts[1] === "image/svg+xml") {
return;
}
// Make sure this element has other attributes which contains image.
// If it doesn't, then this src is important and shouldn't be removed.
var srcCouldBeRemoved = false;
for (var i = 0; i < elem.attributes.length; i++) {
var attr = elem.attributes[i];
if (attr.name === "src") {
continue;
}
if (/\.(jpg|jpeg|png|webp)/i.test(attr.value)) {
srcCouldBeRemoved = true;
break;
}
}
// Here we assume if image is less than 100 bytes (or 133B after encoded to base64)
// it will be too small, therefore it might be placeholder image.
if (srcCouldBeRemoved) {
var b64starts = elem.src.search(/base64\s*/i) + 7;
var b64length = elem.src.length - b64starts;
if (b64length < 133) {
elem.removeAttribute("src");
}
}
}
// also check for "null" to work around https://github.com/jsdom/jsdom/issues/2580
if ((elem.src || (elem.srcset && elem.srcset != "null")) && elem.className.toLowerCase().indexOf("lazy") === -1) {
return;
}
for (var j = 0; j < elem.attributes.length; j++) {
attr = elem.attributes[j];
if (attr.name === "src" || attr.name === "srcset" || attr.name === "alt") {
continue;
}
var copyTo = null;
if (/\.(jpg|jpeg|png|webp)\s+\d/.test(attr.value)) {
copyTo = "srcset";
} else if (/^\s*\S+\.(jpg|jpeg|png|webp)\S*\s*$/.test(attr.value)) {
copyTo = "src";
}
if (copyTo) {
//if this is an img or picture, set the attribute directly
if (elem.tagName === "IMG" || elem.tagName === "PICTURE") {
elem.setAttribute(copyTo, attr.value);
} else if (elem.tagName === "FIGURE" && !this._getAllNodesWithTag(elem, ["img", "picture"]).length) {
//if the item is a <figure> that does not contain an image or picture, create one and place it inside the figure
//see the nytimes-3 testcase for an example
var img = this._doc.createElement("img");
img.setAttribute(copyTo, attr.value);
elem.appendChild(img);
}
}
}
});
},
_getTextDensity: function(e, tags) {
var textLength = this._getInnerText(e, true).length;
if (textLength === 0) {
return 0;
}
var childrenLength = 0;
var children = this._getAllNodesWithTag(e, tags);
this._forEachNode(children, (child) => childrenLength += this._getInnerText(child, true).length);
return childrenLength / textLength;
},
/**
* Clean an element of all tags of type "tag" if they look fishy.
* "Fishy" is an algorithm based on content length, classnames, link density, number of images & embeds, etc.
*
* @return void
**/
_cleanConditionally: function(e, tag) {
if (!this._flagIsActive(this.FLAG_CLEAN_CONDITIONALLY))
return;
// Gather counts for other typical elements embedded within.
// Traverse backwards so we can remove nodes at the same time
// without effecting the traversal.
//
// TODO: Consider taking into account original contentScore here.
this._removeNodes(this._getAllNodesWithTag(e, [tag]), function(node) {
// First check if this node IS data table, in which case don't remove it.
var isDataTable = function(t) {
return t._readabilityDataTable;
};
var isList = tag === "ul" || tag === "ol";
if (!isList) {
var listLength = 0;
var listNodes = this._getAllNodesWithTag(node, ["ul", "ol"]);
this._forEachNode(listNodes, (list) => listLength += this._getInnerText(list).length);
isList = listLength / this._getInnerText(node).length > 0.9;
}
if (tag === "table" && isDataTable(node)) {
return false;
}
// Next check if we're inside a data table, in which case don't remove it as well.
if (this._hasAncestorTag(node, "table", -1, isDataTable)) {
return false;
}
if (this._hasAncestorTag(node, "code")) {
return false;
}
var weight = this._getClassWeight(node);
this.log("Cleaning Conditionally", node);
var contentScore = 0;
if (weight + contentScore < 0) {
return true;
}
if (this._getCharCount(node, ",") < 10) {
// If there are not very many commas, and the number of
// non-paragraph elements is more than paragraphs or other
// ominous signs, remove the element.
var p = node.getElementsByTagName("p").length;
var img = node.getElementsByTagName("img").length;
var li = node.getElementsByTagName("li").length - 100;
var input = node.getElementsByTagName("input").length;
var headingDensity = this._getTextDensity(node, ["h1", "h2", "h3", "h4", "h5", "h6"]);
var embedCount = 0;
var embeds = this._getAllNodesWithTag(node, ["object", "embed", "iframe"]);
for (var i = 0; i < embeds.length; i++) {
// If this embed has attribute that matches video regex, don't delete it.
for (var j = 0; j < embeds[i].attributes.length; j++) {
if (this._allowedVideoRegex.test(embeds[i].attributes[j].value)) {
return false;
}
}
// For embed with <object> tag, check inner HTML as well.
if (embeds[i].tagName === "object" && this._allowedVideoRegex.test(embeds[i].innerHTML)) {
return false;
}
embedCount++;
}
var linkDensity = this._getLinkDensity(node);
var contentLength = this._getInnerText(node).length;
var haveToRemove =
(img > 1 && p / img < 0.5 && !this._hasAncestorTag(node, "figure")) ||
(!isList && li > p) ||
(input > Math.floor(p/3)) ||
(!isList && headingDensity < 0.9 && contentLength < 25 && (img === 0 || img > 2) && !this._hasAncestorTag(node, "figure")) ||
(!isList && weight < 25 && linkDensity > 0.2) ||
(weight >= 25 && linkDensity > 0.5) ||
((embedCount === 1 && contentLength < 75) || embedCount > 1);
// Allow simple lists of images to remain in pages
if (isList && haveToRemove) {
for (var x = 0; x < node.children.length; x++) {
let child = node.children[x];
// Don't filter in lists with li's that contain more than one child
if (child.children.length > 1) {
return haveToRemove;
}
}
let li_count = node.getElementsByTagName("li").length;
// Only allow the list to remain if every li contains an image
if (img == li_count) {
return false;
}
}
return haveToRemove;
}
return false;
});
},
/**
* Clean out elements that match the specified conditions
*
* @param Element
* @param Function determines whether a node should be removed
* @return void
**/
_cleanMatchedNodes: function(e, filter) {
var endOfSearchMarkerNode = this._getNextNode(e, true);
var next = this._getNextNode(e);
while (next && next != endOfSearchMarkerNode) {
if (filter.call(this, next, next.className + " " + next.id)) {
next = this._removeAndGetNext(next);
} else {
next = this._getNextNode(next);
}
}
},
/**
* Clean out spurious headers from an Element.
*
* @param Element
* @return void
**/
_cleanHeaders: function(e) {
let headingNodes = this._getAllNodesWithTag(e, ["h1", "h2"]);
this._removeNodes(headingNodes, function(node) {
let shouldRemove = this._getClassWeight(node) < 0;
if (shouldRemove) {
this.log("Removing header with low class weight:", node);
}
return shouldRemove;
});
},
/**
* Check if this node is an H1 or H2 element whose content is mostly
* the same as the article title.
*
* @param Element the node to check.
* @return boolean indicating whether this is a title-like header.
*/
_headerDuplicatesTitle: function(node) {
if (node.tagName != "H1" && node.tagName != "H2") {
return false;
}
var heading = this._getInnerText(node, false);
this.log("Evaluating similarity of header:", heading, this._articleTitle);
return this._textSimilarity(this._articleTitle, heading) > 0.75;
},
_flagIsActive: function(flag) {
return (this._flags & flag) > 0;
},
_removeFlag: function(flag) {
this._flags = this._flags & ~flag;
},
_isProbablyVisible: function(node) {
// Have to null-check node.style and node.className.indexOf to deal with SVG and MathML nodes.
return (!node.style || node.style.display != "none")
&& (!node.style || node.style.visibility != "hidden")
&& !node.hasAttribute("hidden")
//check for "fallback-image" so that wikimedia math images are displayed
&& (!node.hasAttribute("aria-hidden") || node.getAttribute("aria-hidden") != "true" || (node.className && node.className.indexOf && node.className.indexOf("fallback-image") !== -1));
},
/**
* Runs readability.
*
* Workflow:
* 1. Prep the document by removing script tags, css, etc.
* 2. Build readability's DOM tree.
* 3. Grab the article content from the current dom tree.
* 4. Replace the current DOM tree with the new one.
* 5. Read peacefully.
*
* @return void
**/
parse: function () {
// Avoid parsing too large documents, as per configuration option
if (this._maxElemsToParse > 0) {
var numTags = this._doc.getElementsByTagName("*").length;
if (numTags > this._maxElemsToParse) {
throw new Error("Aborting parsing document; " + numTags + " elements found");
}
}
// Unwrap image from noscript
this._unwrapNoscriptImages(this._doc);
// Extract JSON-LD metadata before removing scripts
var jsonLd = this._disableJSONLD ? {} : this._getJSONLD(this._doc);
// Remove script tags from the document.
this._removeScripts(this._doc);
this._prepDocument();
var metadata = this._getArticleMetadata(jsonLd);
this._articleTitle = metadata.title;
var articleContent = this._grabArticle();
if (!articleContent)
return null;
this.log("Grabbed: " + articleContent.innerHTML);
this._postProcessContent(articleContent);
// If we haven't found an excerpt in the article's metadata, use the article's
// first paragraph as the excerpt. This is used for displaying a preview of
// the article's content.
if (!metadata.excerpt) {
var paragraphs = articleContent.getElementsByTagName("p");
if (paragraphs.length > 0) {
metadata.excerpt = paragraphs[0].textContent.trim();
}
}
var textContent = articleContent.textContent;
return {
title: this._articleTitle,
byline: metadata.byline || this._articleByline,
dir: this._articleDir,
lang: this._articleLang,
content: this._serializer(articleContent),
textContent: textContent,
length: textContent.length,
excerpt: metadata.excerpt,
siteName: metadata.siteName || this._articleSiteName,
publishedTime: metadata.publishedTime
};
}
};
if (typeof module === "object") {
/* global module */
module.exports = Readability;
}
```
--------------------------------------------------------------------------------
/examples/document_conversion_and_processing_demo.py:
--------------------------------------------------------------------------------
```python
#!/usr/bin/env python
"""
DETAILED Demonstration script for the STANDALONE Document Processing functions
in Ultimate MCP Server, showcasing integrated OCR, analysis, conversion, and batch capabilities
with extensive examples.
"""
import asyncio
import base64
import datetime as dt
import json
import os
import sys
import traceback # Added for more detailed error printing if needed
import warnings # Added for warning control
from pathlib import Path
from typing import Any, Awaitable, Callable, Dict, Optional, Tuple
import httpx
# Filter Docling-related deprecation warnings
warnings.filterwarnings("ignore", category=DeprecationWarning, module="docling")
warnings.filterwarnings("ignore", category=DeprecationWarning, module="docling_core")
warnings.filterwarnings("ignore", message="Could not parse formula with MathML")
# Add project root to path for imports when running as script
# Adjust this relative path if your script structure is different
_PROJECT_ROOT = Path(__file__).resolve().parent.parent
if str(_PROJECT_ROOT) not in sys.path:
sys.path.insert(0, str(_PROJECT_ROOT))
print(f"INFO: Added {_PROJECT_ROOT} to sys.path")
# Rich imports for enhanced terminal UI
from rich import box, get_console # noqa: E402
from rich.console import Group # noqa: E402
from rich.layout import Layout # noqa: E402
from rich.markdown import Markdown # noqa: E402
from rich.markup import escape # noqa: E402
from rich.panel import Panel # noqa: E402
from rich.progress import ( # noqa: E402
BarColumn,
FileSizeColumn,
Progress,
TextColumn,
TimeRemainingColumn,
TransferSpeedColumn,
)
from rich.rule import Rule # noqa: E402
from rich.syntax import Syntax # noqa: E402
from rich.table import Table # noqa: E402
from rich.text import Text # noqa: E402
from rich.traceback import install as install_rich_traceback # noqa: E402
# --- Global Constants ---
# Maximum number of lines to display for any content
MAX_DISPLAY_LINES = 50 # Used to truncate all displayed content
# --- Attempt to import required MCP Server components ---
try:
# Assuming standard MCP Server structure
from ultimate_mcp_server.core.server import Gateway
from ultimate_mcp_server.exceptions import ToolError, ToolInputError
# Import the standalone functions and availability flags
from ultimate_mcp_server.tools.document_conversion_and_processing import (
# Import availability flags
_DOCLING_AVAILABLE,
_PANDAS_AVAILABLE,
_TIKTOKEN_AVAILABLE,
analyze_pdf_structure,
canonicalise_entities,
chunk_document,
clean_and_format_text_as_markdown,
convert_document,
detect_content_type,
enhance_ocr_text,
extract_entities,
extract_metrics,
extract_tables,
flag_risks,
generate_qa_pairs,
identify_sections,
ocr_image,
optimize_markdown_formatting,
process_document_batch,
summarize_document,
)
from ultimate_mcp_server.utils import get_logger
from ultimate_mcp_server.utils.display import CostTracker # Import CostTracker
MCP_COMPONENTS_LOADED = True
except ImportError as e:
MCP_COMPONENTS_LOADED = False
_IMPORT_ERROR_MSG = str(e)
# Handle this error gracefully in the main function
print(f"\n[ERROR] Failed to import required MCP components: {_IMPORT_ERROR_MSG}")
print("Please ensure:")
print("1. You are running this script from the correct directory structure.")
print("2. The MCP Server environment is activated.")
print("3. All dependencies (including optional ones used in the demo) are installed.")
sys.exit(1)
# Initialize Rich console and logger
console = get_console()
logger = get_logger("demo.doc_proc_standalone") # Updated logger name
# Install rich tracebacks for better error display
install_rich_traceback(show_locals=True, width=console.width, extra_lines=2)
# --- Configuration ---
SCRIPT_DIR = Path(__file__).resolve().parent
DEFAULT_SAMPLE_DIR = SCRIPT_DIR / "sample_docs" # Changed dir name slightly
DEFAULT_SAMPLE_PDF_URL = "https://arxiv.org/pdf/1706.03762.pdf" # Attention is All You Need
DEFAULT_SAMPLE_IMAGE_URL = "https://raw.githubusercontent.com/tesseract-ocr/tesseract/main/testing/phototest.tif" # Use Tesseract sample TIFF
SAMPLE_HTML_URL = "https://en.wikipedia.org/wiki/Transformer_(machine_learning_model)"
# Additional sample PDFs for testing diversity
BUFFETT_SHAREHOLDER_LETTER_URL = "https://www.berkshirehathaway.com/letters/2022ltr.pdf" # Likely digital PDF, good for text/layout
BACKPROPAGATION_PAPER_URL = "https://www.iro.umontreal.ca/~vincentp/ift3395/lectures/backprop_old.pdf" # Older, might be scanned/need OCR
DOWNLOADED_FILES_DIR = DEFAULT_SAMPLE_DIR / "downloaded"
# Config from environment variables
USE_GPU = os.environ.get("USE_GPU", "true").lower() == "true"
MAX_CONCURRENT_TASKS = int(os.environ.get("MAX_CONCURRENT_TASKS", "3"))
ACCELERATOR_DEVICE = "cuda" if USE_GPU else "cpu"
SKIP_DOWNLOADS = os.environ.get("SKIP_DOWNLOADS", "false").lower() == "true"
LOG_LEVEL = os.environ.get("LOG_LEVEL", "INFO").upper()
# Define result types for type hints
ResultData = Dict[str, Any]
OperationResult = Tuple[bool, ResultData]
FileResult = Optional[Path]
# --- Demo Helper Functions (Mostly unchanged, minor adjustments for clarity) ---
def create_demo_layout() -> Layout:
"""Create a Rich layout for the demo UI."""
layout = Layout(name="root")
layout.split(
Layout(name="header", size=5),
Layout(name="body", ratio=1),
Layout(name="footer", size=1),
)
layout["footer"].update("[dim]Standalone Document Processing Demo Footer[/]")
return layout
def timestamp_str(short: bool = False) -> str:
"""Return a formatted timestamp string."""
now = dt.datetime.now()
if short:
return f"[dim]{now.strftime('%H:%M:%S')}[/]"
return f"[dim]{now.strftime('%Y-%m-%d %H:%M:%S')}[/]"
def truncate_text_by_lines(text: str, max_lines: int = MAX_DISPLAY_LINES) -> str:
"""Truncates text to show first/last lines with indicator."""
if not text or not isinstance(text, str):
return ""
lines = text.splitlines()
if len(lines) <= max_lines:
return text
half_lines = max_lines // 2
return "\n".join(lines[:half_lines] + ["[dim][...TRUNCATED...]"] + lines[-half_lines:])
def format_value_for_display(key: str, value: Any, detail_level: int = 1) -> Any:
"""Format specific values for better display."""
if value is None:
return "[dim]None[/]"
if isinstance(value, bool):
return "[green]Yes[/]" if value else "[red]No[/]"
if isinstance(value, float):
# Specific formatting for processing_time
if "time" in key.lower() and not key.lower().startswith("creation"):
return f"[green]{value:.3f}s[/]"
return f"{value:.3f}" # Standard float formatting
if isinstance(value, list):
if not value:
return "[dim]Empty List[/]"
list_len = len(value)
preview_count = 3 if detail_level < 2 else 5
suffix = f" [dim]... ({list_len} items total)[/]" if list_len > preview_count else ""
if detail_level >= 1:
previews = []
for item in value[:preview_count]:
item_preview = format_value_for_display(f"{key}_item", item, detail_level=0)
previews.append(str(item_preview))
return f"[{', '.join(previews)}]{suffix}"
else:
return f"[List with {list_len} items]"
if isinstance(value, dict):
if not value:
return "[dim]Empty Dict[/]"
dict_len = len(value)
preview_count = 4 if detail_level < 2 else 8
preview_keys = list(value.keys())[:preview_count]
suffix = f" [dim]... ({dict_len} keys total)[/]" if dict_len > preview_count else ""
if detail_level >= 1:
items_preview = [
f"{repr(k)}: {format_value_for_display(k, value[k], detail_level=0)}"
for k in preview_keys
]
return f"{{{'; '.join(items_preview)}}}{suffix}"
else:
return f"[Dict with {dict_len} keys]"
if isinstance(value, str):
str_len = len(value)
# Always truncate by lines first for display consistency
truncated_by_lines = truncate_text_by_lines(value, MAX_DISPLAY_LINES)
# Then apply character limit if still too long
preview_len = 300 if detail_level < 2 else 600
if len(truncated_by_lines) > preview_len:
return escape(truncated_by_lines[:preview_len]) + f"[dim]... ({str_len} chars total)[/]"
return escape(truncated_by_lines)
return escape(str(value))
def display_result(title: str, result: ResultData, display_options: Optional[Dict] = None) -> None:
"""Display operation result with enhanced formatting using Rich."""
display_options = display_options or {}
start_time = dt.datetime.now()
title_display = Text.from_markup(escape(title)) if not isinstance(title, Text) else title
console.print(Rule(f"[bold cyan]{title_display}[/] {timestamp_str()}", style="cyan"))
success = result.get("success", False)
detail_level = display_options.get("detail_level", 1)
hide_keys_set = set(
display_options.get("hide_keys", ["success", "raw_llm_response", "raw_text"])
)
display_keys = display_options.get("display_keys")
# --- Summary Panel ---
summary_panel_content = Text()
summary_panel_content.append(
Text.from_markup(
f"Status: {'[bold green]Success[/]' if success else '[bold red]Failed[/]'}\n"
)
)
if not success:
error_code = result.get("error_code", "N/A")
error_msg = result.get("error", "Unknown error")
summary_panel_content.append(
Text.from_markup(f"Error Code: [yellow]{escape(str(error_code))}[/]\n")
)
summary_panel_content.append(
Text.from_markup(f"Message: [red]{escape(str(error_msg))}[/]\n")
)
console.print(
Panel(
summary_panel_content, title="Operation Status", border_style="red", padding=(1, 2)
)
)
return # Stop display if failed
top_level_info = {
"processing_time": "Processing Time",
"extraction_strategy_used": "Strategy Used",
"output_format": "Output Format",
"was_html": "Input Detected as HTML", # Relevant for clean_and_format...
"file_path": "Output File Path",
}
for key, display_name in top_level_info.items():
if key in result and key not in hide_keys_set:
value_str = format_value_for_display(key, result[key], detail_level=0)
summary_panel_content.append(
Text.from_markup(f"{display_name}: [blue]{value_str}[/]\n")
)
console.print(
Panel(
summary_panel_content, title="Operation Summary", border_style="green", padding=(1, 2)
)
)
# --- Details Section ---
details_to_display = {}
for key, value in result.items():
if (
key in hide_keys_set or key in top_level_info or key.startswith("_")
): # Skip internal keys
continue
if display_keys and key not in display_keys:
continue
details_to_display[key] = value
if not details_to_display:
console.print(Text.from_markup("[dim]No further details requested or available.[/]"))
console.print()
return
console.print(Rule("Details", style="dim"))
for key, value in details_to_display.items():
key_title = key.replace("_", " ").title()
panel_border = "blue"
panel_content: Any = None
format_type = "text"
# Determine format for content-like keys
is_content_key = key.lower() in [
"content",
"markdown_text",
"optimized_markdown",
"summary",
"first_table_preview",
"tables",
]
if is_content_key:
if "markdown" in key.lower() or result.get("output_format") == "markdown":
format_type = "markdown"
elif result.get("output_format") == "html":
format_type = "html"
elif (
result.get("output_format") == "json"
or key == "tables"
and result.get("tables")
and isinstance(result.get("tables")[0], list)
):
format_type = "json"
elif (
key == "tables"
and result.get("tables")
and isinstance(result.get("tables")[0], str)
): # Assuming CSV string
format_type = "csv"
else:
format_type = "text"
format_type = display_options.get("format_key", {}).get(
key, format_type
) # Allow override
if is_content_key and isinstance(value, str):
if not value:
panel_content = "[dim]Empty Content[/]"
else:
truncated_value = truncate_text_by_lines(value, MAX_DISPLAY_LINES)
if format_type == "markdown":
panel_content = Markdown(truncated_value)
elif format_type == "csv":
panel_content = Syntax(
truncated_value,
"csv",
theme="paraiso-dark",
line_numbers=False,
word_wrap=True,
)
else:
panel_content = Syntax(
truncated_value,
format_type,
theme="paraiso-dark",
line_numbers=False,
word_wrap=True,
)
panel_border = "green" if format_type == "markdown" else "white"
console.print(
Panel(
panel_content,
title=key_title,
border_style=panel_border,
padding=(1, 2),
expand=False,
)
)
elif key.lower() == "chunks" and isinstance(value, list):
chunk_table = Table(
title=f"Chunk Preview (Total: {len(value)})", box=box.MINIMAL, show_header=True
)
chunk_table.add_column("#", style="cyan")
chunk_table.add_column("Preview (First 80 chars)", style="white")
chunk_table.add_column("Length", style="green")
limit = 5 if detail_level < 2 else 10
for i, chunk in enumerate(value[:limit], 1):
chunk_str = str(chunk)
chunk_preview = truncate_text_by_lines(
chunk_str[:80] + ("..." if len(chunk_str) > 80 else ""), 5
)
chunk_table.add_row(str(i), escape(chunk_preview), str(len(chunk_str)))
if len(value) > limit:
chunk_table.add_row("...", f"[dim]{len(value) - limit} more...[/]", "")
console.print(Panel(chunk_table, title=key_title, border_style="blue"))
elif key.lower() == "qa_pairs" and isinstance(value, list):
qa_text = Text()
limit = 3 if detail_level < 2 else 5
for i, qa in enumerate(value[:limit], 1):
q_text = truncate_text_by_lines(qa.get("question", ""), 5)
a_text = truncate_text_by_lines(qa.get("answer", ""), 10)
qa_text.append(f"{i}. Q: ", style="bold cyan")
qa_text.append(escape(q_text) + "\n")
qa_text.append(" A: ", style="green")
qa_text.append(escape(a_text) + "\n\n")
if len(value) > limit:
qa_text.append(f"[dim]... {len(value) - limit} more ...[/]")
console.print(Panel(qa_text, title=key_title, border_style="blue"))
elif (
key.lower() == "tables" and isinstance(value, list) and value
): # Handle table list (JSON/Pandas)
first_table = value[0]
if isinstance(first_table, list): # JSON format
panel_content = Syntax(
json.dumps(first_table[:5], indent=2),
"json",
theme="paraiso-dark",
line_numbers=False,
word_wrap=True,
)
panel_title = f"{key_title} (First Table JSON Preview, {len(value)} total)"
console.print(
Panel(panel_content, title=panel_title, border_style="yellow", padding=(1, 1))
)
elif hasattr(first_table, "to_string"): # Pandas DataFrame
panel_content = escape(first_table.head(5).to_string())
panel_title = f"{key_title} (First Table Pandas Preview, {len(value)} total)"
console.print(
Panel(panel_content, title=panel_title, border_style="yellow", padding=(1, 1))
)
else: # Fallback if format unknown
console.print(
Panel(
f"First table type: {type(first_table).__name__}. Preview:\n{str(first_table)[:500]}...",
title=key_title,
border_style="yellow",
)
)
elif isinstance(value, dict): # General Dict Handling (metadata, metrics, risks, etc.)
dict_table = Table(title="Contents", box=box.MINIMAL, show_header=False, expand=False)
dict_table.add_column("SubKey", style="magenta", justify="right", no_wrap=True)
dict_table.add_column("SubValue", style="white")
item_count = 0
max_items = 5 if detail_level == 0 else 20
for k, v in value.items():
dict_table.add_row(
escape(str(k)), format_value_for_display(k, v, detail_level=detail_level)
)
item_count += 1
if item_count >= max_items:
dict_table.add_row("[dim]...[/]", f"[dim]({len(value)} total items)[/]")
break
panel_border = (
"magenta" if "quality" in key.lower() or "metrics" in key.lower() else "blue"
)
console.print(
Panel(dict_table, title=key_title, border_style=panel_border, padding=(1, 1))
)
elif isinstance(value, list): # General List Handling
list_panel_content = [Text.from_markup(f"[cyan]Total Items:[/] {len(value)}")]
limit = 5 if detail_level < 2 else 10
for i, item in enumerate(value[:limit]):
item_display = format_value_for_display(
f"{key}[{i}]", item, detail_level=detail_level - 1
)
list_panel_content.append(f"[magenta]{i + 1}.[/] {item_display}")
if len(value) > limit:
list_panel_content.append(
Text.from_markup(f"[dim]... {len(value) - limit} more ...[/]")
)
console.print(Panel(Group(*list_panel_content), title=key_title, border_style="blue"))
else: # Fallback for simple types
value_display = format_value_for_display(key, value, detail_level=detail_level)
console.print(f"[bold cyan]{key_title}:[/] {value_display}")
end_time = dt.datetime.now()
elapsed = (end_time - start_time).total_seconds()
console.print(Text.from_markup(f"[dim]Result details displayed in {elapsed:.3f}s[/]"))
console.print() # Add spacing
async def download_file_with_progress(
url: str, output_path: Path, description: str, progress: Optional[Progress] = None
) -> FileResult:
"""Download a file with a detailed progress bar."""
if output_path.exists() and output_path.stat().st_size > 1000:
logger.info(f"Using existing file: {output_path}")
console.print(
Text.from_markup(f"[dim]Using existing file: [blue underline]{output_path.name}[/][/]")
)
return output_path
if SKIP_DOWNLOADS:
console.print(
f"[yellow]Skipping download for {description} due to SKIP_DOWNLOADS setting.[/]"
)
return None
console.print(f"Attempting to download [bold]{description}[/] from [underline]{url}[/]...")
output_path.parent.mkdir(parents=True, exist_ok=True)
try:
async with httpx.AsyncClient(follow_redirects=True, timeout=60.0) as client:
async with client.stream("GET", url) as response:
if response.status_code == 404:
logger.error(f"File not found (404) at {url}")
console.print(f"[red]Error: File not found (404) for {description}.[/]")
return None
response.raise_for_status()
total_size = int(response.headers.get("content-length", 0))
task_description = f"Downloading {description}..."
local_progress = progress is None
if local_progress:
progress = Progress( # type: ignore
TextColumn("[bold blue]{task.description}", justify="right"),
BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
TransferSpeedColumn(),
"•",
FileSizeColumn(),
"•",
TimeRemainingColumn(),
console=console,
transient=True,
)
progress.start() # type: ignore
download_task = progress.add_task(task_description, total=total_size) # type: ignore
bytes_downloaded = 0
try:
with open(output_path, "wb") as f:
async for chunk in response.aiter_bytes():
f.write(chunk)
bytes_written = len(chunk)
bytes_downloaded += bytes_written
progress.update(download_task, advance=bytes_written) # type: ignore
progress.update(
download_task,
completed=max(bytes_downloaded, total_size),
description=f"Downloaded {description}",
) # type: ignore
finally:
if local_progress:
progress.stop() # type: ignore
logger.info(f"Successfully downloaded {description} to {output_path}")
console.print(
Text.from_markup(
f"[green]✓ Downloaded {description} to [blue underline]{output_path.name}[/][/]"
)
)
return output_path
except httpx.RequestError as e:
logger.error(f"Network error downloading {description} from {url}: {e}")
console.print(
Text.from_markup(
f"[red]Network Error downloading {description}: {type(e).__name__}. Check connection or URL.[/]"
)
)
return None
except Exception as e:
logger.error(f"Failed to download {description} from {url}: {e}", exc_info=True)
console.print(
Text.from_markup(f"[red]Error downloading {description}: {type(e).__name__} - {e}[/]")
)
if output_path.exists():
try:
output_path.unlink()
except OSError:
pass
return None
async def safe_tool_call(
operation_name: str,
tool_func: Callable[..., Awaitable[Dict]],
*args,
tracker: Optional[CostTracker] = None,
**kwargs,
) -> OperationResult:
"""Safely call a standalone tool function, handling exceptions and logging."""
console.print(
Text.from_markup(
f"\n[cyan]Calling Tool:[/][bold] {escape(operation_name)}[/] {timestamp_str(short=True)}"
)
)
display_options = kwargs.pop("display_options", {}) # Extract display options
# Log arguments carefully
log_args_repr = {}
MAX_ARG_LEN = 100
for k, v in kwargs.items():
if k == "image_data" and isinstance(v, str): # Don't log full base64
log_args_repr[k] = f"str(len={len(v)}, starting_chars='{v[:10]}...')"
elif isinstance(v, (str, bytes)) and len(v) > MAX_ARG_LEN:
log_args_repr[k] = f"{type(v).__name__}(len={len(v)})"
elif isinstance(v, (list, dict)) and len(v) > 10:
log_args_repr[k] = f"{type(v).__name__}(len={len(v)})"
else:
log_args_repr[k] = repr(v)
logger.debug(f"Executing {operation_name} with kwargs: {log_args_repr}")
try:
# Directly call the standalone function
result = await tool_func(*args, **kwargs)
if not isinstance(result, dict):
logger.error(
f"Tool '{operation_name}' returned non-dict type: {type(result)}. Value: {str(result)[:150]}"
)
return False, {
"success": False,
"error": f"Tool returned unexpected type: {type(result).__name__}",
"error_code": "INTERNAL_ERROR",
"_display_options": display_options,
}
# Cost tracking (if applicable)
if tracker is not None and result.get("success", False):
# The standalone functions might not directly return cost info in the same way.
# If LLM calls happen internally, cost tracking might need to be done within
# the `_standalone_llm_call` or rely on the global tracker if `generate_completion` updates it.
# For now, assume cost is tracked elsewhere or add specific fields if needed.
if "llm_cost" in result or "cost" in result:
# Attempt to track cost if relevant fields exist
cost = result.get("cost", result.get("llm_cost", 0.0))
input_tokens = result.get("input_tokens", 0)
output_tokens = result.get("output_tokens", 0)
provider = result.get("provider", "unknown")
model = result.get("model", operation_name) # Use op name as fallback model
processing_time = result.get("processing_time", 0.0)
tracker.add_call_data(
cost, input_tokens, output_tokens, provider, model, processing_time
)
result["_display_options"] = display_options # Pass options for display func
logger.debug(f"Tool '{operation_name}' completed successfully.")
return True, result
except ToolInputError as e:
logger.warning(f"Input error for {operation_name}: {e}")
return False, {
"success": False,
"error": str(e),
"error_code": e.error_code,
"_display_options": display_options,
}
except ToolError as e:
logger.error(f"Tool error during {operation_name}: {e}")
return False, {
"success": False,
"error": str(e),
"error_code": e.error_code,
"_display_options": display_options,
}
except Exception as e:
logger.error(f"Unexpected error during {operation_name}: {e}", exc_info=True)
tb_str = traceback.format_exc(limit=1)
return False, {
"success": False,
"error": f"{type(e).__name__}: {e}\n{tb_str}",
"error_type": type(e).__name__,
"error_code": "UNEXPECTED_ERROR",
"_display_options": display_options,
}
# --- Demo Sections (Updated to call standalone functions) ---
async def demo_section_1_conversion_ocr(
sample_files: Dict[str, Path], tracker: CostTracker
) -> None:
"""Demonstrate convert_document with various strategies and OCR."""
console.print(Rule("[bold green]Demo 1: Document Conversion & OCR[/]", style="green"))
logger.info("Starting Demo Section 1: Conversion & OCR")
pdf_digital = sample_files.get("pdf_digital")
buffett_pdf = sample_files.get("buffett_pdf")
backprop_pdf = sample_files.get("backprop_pdf")
conversion_outputs_dir = sample_files.get("conversion_outputs_dir")
pdf_files_to_process = [pdf for pdf in [pdf_digital, buffett_pdf, backprop_pdf] if pdf]
if not pdf_files_to_process:
console.print("[yellow]Skipping Demo 1: Need at least one sample PDF.[/]")
return
def get_output_path(
input_file: Path, format_name: str, strategy: str, output_format: str
) -> str:
base_name = input_file.stem
return str(conversion_outputs_dir / f"{base_name}_{strategy}_{format_name}.{output_format}")
for pdf_file in pdf_files_to_process:
console.print(
Panel(
Text.from_markup(f"Processing PDF: [cyan]{pdf_file.name}[/]"), border_style="blue"
)
)
# 1a: Direct Text Strategy (Raw Text)
output_path = get_output_path(pdf_file, "direct", "raw_text", "txt")
success, result = await safe_tool_call(
f"{pdf_file.name} -> Text (Direct Text)",
convert_document, # Call standalone function
tracker=tracker,
document_path=str(pdf_file),
output_format="text",
extraction_strategy="direct_text",
enhance_with_llm=False,
save_to_file=True,
output_path=output_path,
)
if success:
display_result(
f"{pdf_file.name} -> Text (Direct Text)",
result,
{"format_key": {"content": "text"}},
)
# 1b: Direct Text Strategy (Markdown Output + Enhance)
output_path = get_output_path(pdf_file, "direct", "enhanced_md", "md")
success, result = await safe_tool_call(
f"{pdf_file.name} -> MD (Direct Text + Enhance)",
convert_document, # Call standalone function
tracker=tracker,
document_path=str(pdf_file),
output_format="markdown",
extraction_strategy="direct_text",
enhance_with_llm=True,
save_to_file=True,
output_path=output_path,
)
if success:
display_result(
f"{pdf_file.name} -> MD (Direct + Enhance)",
result,
{"format_key": {"content": "markdown"}},
)
# 1c: Docling Strategy (Markdown Output) - Check availability
if _DOCLING_AVAILABLE:
output_path = get_output_path(pdf_file, "docling", "md", "md")
success, result = await safe_tool_call(
f"{pdf_file.name} -> MD (Docling)",
convert_document, # Call standalone function
tracker=tracker,
document_path=str(pdf_file),
output_format="markdown",
extraction_strategy="docling",
accelerator_device=ACCELERATOR_DEVICE,
save_to_file=True,
output_path=output_path,
)
if success:
display_result(
f"{pdf_file.name} -> MD (Docling)",
result,
{"format_key": {"content": "markdown"}},
)
else:
console.print("[yellow]Docling unavailable, skipping Docling conversions.[/]")
# --- OCR on PDF ---
console.print(
Panel(
f"Processing PDF with OCR Strategy: [cyan]{pdf_file.name}[/]", border_style="blue"
)
)
# 1d: OCR Strategy (Raw Text)
output_path = get_output_path(pdf_file, "ocr", "raw_text", "txt")
success, result = await safe_tool_call(
f"{pdf_file.name} -> Text (OCR Raw)",
convert_document, # Call standalone function
tracker=tracker,
document_path=str(pdf_file),
output_format="text",
extraction_strategy="ocr",
enhance_with_llm=False,
ocr_options={"language": "eng", "dpi": 150},
save_to_file=True,
output_path=output_path,
)
if success:
display_result(
f"{pdf_file.name} -> Text (OCR Raw)",
result,
{"format_key": {"content": "text"}, "detail_level": 0},
)
# 1e: OCR Strategy (Markdown, Enhanced, Quality Assess)
output_path = get_output_path(pdf_file, "ocr", "enhanced_md", "md")
success, result = await safe_tool_call(
f"{pdf_file.name} -> MD (OCR + Enhance + Quality)",
convert_document, # Call standalone function
tracker=tracker,
document_path=str(pdf_file),
output_format="markdown",
extraction_strategy="ocr",
enhance_with_llm=True,
ocr_options={
"language": "eng",
"assess_quality": True,
"remove_headers": True,
"dpi": 200,
}, # Try header removal
save_to_file=True,
output_path=output_path,
)
if success:
display_result(
f"{pdf_file.name} -> MD (OCR + Enhance + Quality)",
result,
{"format_key": {"content": "markdown"}},
)
# 1f: Hybrid Strategy
output_path = get_output_path(pdf_file, "hybrid", "text", "txt")
success, result = await safe_tool_call(
f"{pdf_file.name} -> Text (Hybrid + Enhance)",
convert_document, # Call standalone function
tracker=tracker,
document_path=str(pdf_file),
output_format="text",
extraction_strategy="hybrid_direct_ocr",
enhance_with_llm=True,
save_to_file=True,
output_path=output_path,
)
if success:
display_result(
f"{pdf_file.name} -> Text (Hybrid + Enhance)",
result,
{"format_key": {"content": "text"}},
)
# --- Image Conversion (Using convert_document) ---
image_file = sample_files.get("image")
if image_file:
console.print(
Panel(
f"Processing Image via convert_document: [cyan]{image_file.name}[/]",
border_style="blue",
)
)
output_path = get_output_path(image_file, "convert_doc", "md", "md")
success, result = await safe_tool_call(
f"{image_file.name} -> MD (Convert Doc)",
convert_document, # Call standalone function
tracker=tracker,
document_path=str(image_file),
output_format="markdown", # Strategy inferred
save_to_file=True,
output_path=output_path,
)
if success:
display_result(
f"{image_file.name} -> MD (via convert_document)",
result,
{"format_key": {"content": "markdown"}},
)
# --- Conversion from Bytes ---
if pdf_digital:
console.print(Panel("Processing PDF from Bytes Data using OCR", border_style="blue"))
try:
pdf_bytes = pdf_digital.read_bytes()
output_path = get_output_path(pdf_digital, "bytes", "ocr_text", "txt")
success, result = await safe_tool_call(
"PDF Bytes -> Text (OCR)",
convert_document, # Call standalone function
tracker=tracker,
document_data=pdf_bytes,
output_format="text",
extraction_strategy="ocr",
enhance_with_llm=False,
ocr_options={"dpi": 150},
save_to_file=True,
output_path=output_path,
)
if success:
display_result(
"PDF Bytes -> Text (OCR Raw)",
result,
{"format_key": {"content": "text"}, "detail_level": 0},
)
except Exception as e:
console.print(f"[red]Error processing PDF bytes: {e}[/]")
async def demo_section_2_dedicated_ocr(sample_files: Dict[str, Path], tracker: CostTracker) -> None:
"""Demonstrate the dedicated ocr_image tool."""
console.print(Rule("[bold green]Demo 2: Dedicated Image OCR Tool[/]", style="green"))
logger.info("Starting Demo Section 2: Dedicated Image OCR Tool")
image_file = sample_files.get("image")
conversion_outputs_dir = sample_files.get("conversion_outputs_dir")
if not image_file:
console.print("[yellow]Skipping Demo 2: Sample image not available.[/]")
return
def get_output_path(base_name: str, method: str, output_format: str) -> str:
return str(conversion_outputs_dir / f"{base_name}_ocr_{method}.{output_format}")
console.print(
Panel(
f"Processing Image with ocr_image Tool: [cyan]{image_file.name}[/]", border_style="blue"
)
)
# 2a: OCR Image from Path (Default: Enhance=True, Output=Markdown)
output_path = get_output_path(image_file.stem, "default", "md")
success, result = await safe_tool_call(
"OCR Image (Path, Defaults)",
ocr_image, # Call standalone function
tracker=tracker,
image_path=str(image_file),
)
if success:
try:
Path(output_path).write_text(result.get("content", ""), encoding="utf-8")
console.print(f"[green]✓ Saved OCR output to: [blue underline]{output_path}[/]")
except Exception as e:
console.print(f"[red]Error saving OCR output: {e}[/]")
display_result(
"OCR Image (Path, Defaults)", result, {"format_key": {"content": "markdown"}}
)
# 2b: OCR Image from Path (Raw Text, Specific Preprocessing)
output_path = get_output_path(image_file.stem, "raw_preprocessing", "txt")
success, result = await safe_tool_call(
"OCR Image (Path, Raw Text, Preprocessing)",
ocr_image, # Call standalone function
tracker=tracker,
image_path=str(image_file),
output_format="text",
enhance_with_llm=False,
ocr_options={
"language": "eng",
"preprocessing": {"threshold": "adaptive", "denoise": True, "deskew": False},
},
)
if success:
try:
Path(output_path).write_text(result.get("content", ""), encoding="utf-8")
console.print(f"[green]✓ Saved OCR output to: [blue underline]{output_path}[/]")
except Exception as e:
console.print(f"[red]Error saving OCR output: {e}[/]")
display_result(
"OCR Image (Raw Text, Preprocessing)", result, {"format_key": {"content": "text"}}
)
# 2c: OCR Image from Base64 Data (Enhance=True, Quality Assess)
try:
console.print(Panel("Processing Image from Base64 Data", border_style="blue"))
img_bytes = image_file.read_bytes()
img_base64 = base64.b64encode(img_bytes).decode("utf-8")
output_path = get_output_path(image_file.stem, "base64_enhanced", "md")
success, result = await safe_tool_call(
"OCR Image (Base64, Enhance, Quality)",
ocr_image, # Call standalone function
tracker=tracker,
image_data=img_base64,
output_format="markdown",
enhance_with_llm=True,
ocr_options={"assess_quality": True},
)
if success:
try:
Path(output_path).write_text(result.get("content", ""), encoding="utf-8")
console.print(f"[green]✓ Saved OCR output to: [blue underline]{output_path}[/]")
except Exception as e:
console.print(f"[red]Error saving OCR output: {e}[/]")
display_result(
"OCR Image (Base64, Enhance, Quality)",
result,
{"format_key": {"content": "markdown"}},
)
except Exception as e:
console.print(f"[red]Failed to process image from Base64: {type(e).__name__} - {e}[/]")
async def demo_section_3_enhance_text(sample_files: Dict[str, Path], tracker: CostTracker) -> None:
"""Demonstrate enhancing existing noisy text."""
console.print(Rule("[bold green]Demo 3: Enhance Existing OCR Text[/]", style="green"))
logger.info("Starting Demo Section 3: Enhance OCR Text")
conversion_outputs_dir = sample_files.get("conversion_outputs_dir")
noisy_text = """
INVOlCE # 12345 - ACME C0rp.
Date: Octobor 25, 2O23
Billed To: Example Inc. , 123 Main St . Anytown USA
Itemm Descriptiom Quantlty Unlt Price Tota1
-----------------------------------------------------------------
Wldget Modell A lO $ I5.0O $l5O.OO
Gadgett Type B 5 $ 25.5O $l27.5O
Assembly Srvlce 2 hrs $ 75.OO $l5O.OO
-----------------------------------------------------------------
Subtota1 : $427.5O
Tax (8%) : $ 34.2O
TOTAL : $461.7O
Notes: Payment due ln 3O days. Thank you for yuor buslness!
Page I / l - Confidential Document"""
console.print(Panel("Original Noisy Text:", border_style="yellow"))
console.print(
Syntax(truncate_text_by_lines(noisy_text), "text", theme="default", line_numbers=True)
)
def get_output_path(base_name: str, format_name: str) -> str:
return str(conversion_outputs_dir / f"{base_name}.{format_name}")
# 3a: Enhance to Markdown (Remove Headers, Assess Quality)
output_path = get_output_path("enhanced_noisy_text_markdown", "md")
success, result = await safe_tool_call(
"Enhance -> MD (Rm Headers, Quality)",
enhance_ocr_text, # Call standalone function
tracker=tracker,
text=noisy_text,
output_format="markdown",
enhancement_options={"remove_headers": True, "assess_quality": True},
)
if success:
try:
Path(output_path).write_text(result.get("content", ""), encoding="utf-8")
console.print(f"[green]✓ Saved enhanced markdown to: [blue underline]{output_path}[/]")
except Exception as e:
console.print(f"[red]Error saving enhanced markdown: {e}[/]")
display_result(
"Enhance -> MD (Rm Headers, Quality)", result, {"format_key": {"content": "markdown"}}
)
# 3b: Enhance to Plain Text (Keep Headers)
output_path = get_output_path("enhanced_noisy_text_plain", "txt")
success, result = await safe_tool_call(
"Enhance -> Text (Keep Headers)",
enhance_ocr_text, # Call standalone function
tracker=tracker,
text=noisy_text,
output_format="text",
enhancement_options={"remove_headers": False},
)
if success:
try:
Path(output_path).write_text(result.get("content", ""), encoding="utf-8")
console.print(f"[green]✓ Saved enhanced text to: [blue underline]{output_path}[/]")
except Exception as e:
console.print(f"[red]Error saving enhanced text: {e}[/]")
display_result(
"Enhance -> Text (Keep Headers)", result, {"format_key": {"content": "text"}}
)
async def demo_section_4_html_markdown(sample_files: Dict[str, Path], tracker: CostTracker) -> None:
"""Demonstrate HTML processing and Markdown utilities."""
console.print(Rule("[bold green]Demo 4: HTML & Markdown Processing[/]", style="green"))
logger.info("Starting Demo Section 4: HTML & Markdown Processing")
html_file = sample_files.get("html")
conversion_outputs_dir = sample_files.get("conversion_outputs_dir")
if not html_file:
console.print("[yellow]Skipping Demo 4: Sample HTML not available.[/]")
return
def get_output_path(base_name: str, method: str, format_name: str) -> str:
return str(conversion_outputs_dir / f"{base_name}_{method}.{format_name}")
console.print(Panel(f"Processing HTML File: [cyan]{html_file.name}[/]", border_style="blue"))
try:
html_content = html_file.read_text(encoding="utf-8", errors="replace")
except Exception as e:
console.print(f"[red]Error reading HTML file {html_file}: {e}[/]")
return
# --- clean_and_format_text_as_markdown ---
console.print(Rule("HTML to Markdown Conversion", style="dim"))
# 4a: Auto Extraction (Default)
output_path = get_output_path(html_file.stem, "auto_extract", "md")
success, result_auto = await safe_tool_call(
"HTML -> MD (Auto Extract)",
clean_and_format_text_as_markdown, # Call standalone function
tracker=tracker,
text=html_content,
extraction_method="auto",
preserve_tables=True,
)
if success:
try:
Path(output_path).write_text(result_auto.get("markdown_text", ""), encoding="utf-8")
except Exception as e:
console.print(f"[red]Error saving markdown: {e}[/]")
else:
console.print(
f"[green]✓ Saved auto-extracted markdown to: [blue underline]{output_path}[/]"
)
display_result(
"HTML -> MD (Auto Extract)", result_auto, {"format_key": {"markdown_text": "markdown"}}
)
# 4b: Readability Extraction (No Tables)
output_path = get_output_path(html_file.stem, "readability_no_tables", "md")
success, result_read = await safe_tool_call(
"HTML -> MD (Readability, No Tables)",
clean_and_format_text_as_markdown, # Call standalone function
tracker=tracker,
text=html_content,
extraction_method="readability",
preserve_tables=False,
)
if success:
try:
Path(output_path).write_text(result_read.get("markdown_text", ""), encoding="utf-8")
except Exception as e:
console.print(f"[red]Error saving markdown: {e}[/]")
else:
console.print(
f"[green]✓ Saved readability markdown to: [blue underline]{output_path}[/]"
)
display_result(
"HTML -> MD (Readability, No Tables)",
result_read,
{"format_key": {"markdown_text": "markdown"}},
)
# --- optimize_markdown_formatting ---
console.print(Rule("Markdown Optimization", style="dim"))
markdown_to_optimize = (
result_auto.get("markdown_text") if success else "## Default MD\n* Item 1\n* Item 2\n"
)
if markdown_to_optimize:
console.print(Panel("Original Markdown for Optimization:", border_style="yellow"))
console.print(
Syntax(truncate_text_by_lines(markdown_to_optimize), "markdown", theme="default")
)
# 4c: Optimize with fixes and wrapping
output_path = get_output_path(html_file.stem, "optimized_normalized", "md")
success, result_opt1 = await safe_tool_call(
"Optimize MD (Normalize, Fix, Wrap)",
optimize_markdown_formatting, # Call standalone function
tracker=tracker,
markdown=markdown_to_optimize,
normalize_headings=True,
fix_lists=True,
fix_links=True,
add_line_breaks=True,
max_line_length=80,
)
if success:
try:
Path(output_path).write_text(
result_opt1.get("optimized_markdown", ""), encoding="utf-8"
)
except Exception as e:
console.print(f"[red]Error saving markdown: {e}[/]")
else:
console.print(
f"[green]✓ Saved optimized markdown to: [blue underline]{output_path}[/]"
)
display_result(
"Optimize MD (Normalize, Fix, Wrap)",
result_opt1,
{"format_key": {"optimized_markdown": "markdown"}},
)
# 4d: Optimize in Compact Mode
output_path = get_output_path(html_file.stem, "optimized_compact", "md")
success, result_opt2 = await safe_tool_call(
"Optimize MD (Compact Mode)",
optimize_markdown_formatting, # Call standalone function
tracker=tracker,
markdown=markdown_to_optimize,
compact_mode=True,
)
if success:
try:
Path(output_path).write_text(
result_opt2.get("optimized_markdown", ""), encoding="utf-8"
)
except Exception as e:
console.print(f"[red]Error saving markdown: {e}[/]")
else:
console.print(
f"[green]✓ Saved compact markdown to: [blue underline]{output_path}[/]"
)
display_result(
"Optimize MD (Compact Mode)",
result_opt2,
{"format_key": {"optimized_markdown": "markdown"}},
)
else:
console.print("[yellow]Skipping optimization as initial conversion failed.[/]")
# --- detect_content_type ---
console.print(Rule("Content Type Detection", style="dim"))
success, result_detect = await safe_tool_call(
"Detect Type (HTML)", detect_content_type, text=html_content[:6000], tracker=tracker
)
if success:
display_result("Detect Type (HTML)", result_detect)
md_for_detect = (
result_auto.get("markdown_text", "## Sample\nText") if result_auto else "## Sample\nText"
)
success, result_detect = await safe_tool_call(
"Detect Type (Markdown)", detect_content_type, text=md_for_detect[:6000], tracker=tracker
)
if success:
display_result("Detect Type (Markdown)", result_detect)
async def demo_section_5_analyze_structure(
sample_files: Dict[str, Path], tracker: CostTracker
) -> None:
"""Demonstrate the dedicated PDF structure analysis tool."""
console.print(Rule("[bold green]Demo 5: Analyze PDF Structure Tool[/]", style="green"))
logger.info("Starting Demo Section 5: Analyze PDF Structure")
pdf_digital = sample_files.get("pdf_digital")
buffett_pdf = sample_files.get("buffett_pdf")
backprop_pdf = sample_files.get("backprop_pdf")
conversion_outputs_dir = sample_files.get("conversion_outputs_dir")
pdf_files_to_process = [pdf for pdf in [pdf_digital, buffett_pdf, backprop_pdf] if pdf]
if not pdf_files_to_process:
console.print("[yellow]Skipping Demo 5: No PDF files available.[/]")
return
def get_output_path(file_name: str, analysis_type: str) -> str:
return str(conversion_outputs_dir / f"{file_name}_analysis_{analysis_type}.json")
for pdf_file in pdf_files_to_process:
console.print(
Panel(f"Analyzing PDF Structure: [cyan]{pdf_file.name}[/]", border_style="blue")
)
# 5a: Analyze Structure (Default options)
output_path = get_output_path(pdf_file.stem, "default")
success, result = await safe_tool_call(
f"Analyze {pdf_file.name} Structure (Defaults)",
analyze_pdf_structure, # Call standalone function
tracker=tracker,
file_path=str(pdf_file),
)
if success:
try:
result_to_save = {k: v for k, v in result.items() if not k.startswith("_")}
Path(output_path).write_text(json.dumps(result_to_save, indent=2), encoding="utf-8")
console.print(f"[green]✓ Saved PDF analysis to: [blue underline]{output_path}[/]")
except Exception as e:
console.print(f"[red]Error saving PDF analysis: {e}[/]")
display_result(f"Analyze {pdf_file.name} Structure (Defaults)", result)
# 5b: Analyze Structure (All options enabled)
output_path = get_output_path(pdf_file.stem, "all_options")
success, result_all = await safe_tool_call(
f"Analyze {pdf_file.name} Structure (All Options)",
analyze_pdf_structure, # Call standalone function
tracker=tracker,
file_path=str(pdf_file),
extract_metadata=True,
extract_outline=True,
extract_fonts=True,
extract_images=True,
estimate_ocr_needs=True,
)
if success:
try:
result_to_save = {k: v for k, v in result_all.items() if not k.startswith("_")}
Path(output_path).write_text(json.dumps(result_to_save, indent=2), encoding="utf-8")
console.print(
f"[green]✓ Saved detailed PDF analysis to: [blue underline]{output_path}[/]"
)
except Exception as e:
console.print(f"[red]Error saving PDF analysis: {e}[/]")
display_result(f"Analyze {pdf_file.name} Structure (All Options)", result_all)
async def demo_section_6_chunking_tables(
sample_files: Dict[str, Path], tracker: CostTracker
) -> None:
"""Demonstrate Document Chunking and Table Extraction tools."""
console.print(Rule("[bold green]Demo 6: Chunking & Table Extraction[/]", style="green"))
logger.info("Starting Demo Section 6: Chunking & Table Extraction")
pdf_digital = sample_files.get("pdf_digital")
buffett_pdf = sample_files.get("buffett_pdf")
backprop_pdf = sample_files.get("backprop_pdf")
conversion_outputs_dir = sample_files.get("conversion_outputs_dir")
pdf_files_to_process = [pdf for pdf in [pdf_digital, buffett_pdf, backprop_pdf] if pdf]
if not pdf_files_to_process:
console.print("[yellow]Skipping Demo 6: No PDF files available.[/]")
return
def get_output_path(base_name: str, process_type: str, format_name: str) -> str:
return str(conversion_outputs_dir / f"{base_name}_{process_type}.{format_name}")
for pdf_file in pdf_files_to_process:
try:
console.print(
Panel(
f"Preparing Content for Chunking/Tables from: [cyan]{pdf_file.name}[/]",
border_style="dim",
)
)
success, conv_result = await safe_tool_call(
f"Get MD for {pdf_file.name}",
convert_document, # Call standalone function
tracker=tracker,
document_path=str(pdf_file),
output_format="markdown",
extraction_strategy="direct_text",
enhance_with_llm=False, # Use raw for speed
)
if not success or not conv_result.get("content"):
console.print(
f"[red]Failed to get content for {pdf_file.name}. Skipping chunk/table demo for this file.[/]"
)
continue
markdown_content = conv_result["content"]
console.print("[green]✓ Content prepared.[/]")
# --- Chunking Demonstrations ---
console.print(Rule(f"Document Chunking for {pdf_file.name}", style="dim"))
chunking_configs = [
{"method": "paragraph", "size": 500, "overlap": 50},
{"method": "character", "size": 800, "overlap": 100},
{"method": "token", "size": 200, "overlap": 20},
{"method": "section", "size": 1000, "overlap": 0},
]
for config in chunking_configs:
method, size, overlap = config["method"], config["size"], config["overlap"]
if method == "token" and not _TIKTOKEN_AVAILABLE:
console.print(
f"[yellow]Skipping chunking method '{method}': Tiktoken not available.[/]"
)
continue
output_path = get_output_path(pdf_file.stem, f"chunks_{method}", "json")
success, result = await safe_tool_call(
f"Chunking {pdf_file.name} ({method.capitalize()})",
chunk_document, # Call standalone function
tracker=tracker,
document=markdown_content,
chunk_method=method,
chunk_size=size,
chunk_overlap=overlap,
)
if success:
try:
result_to_save = {k: v for k, v in result.items() if not k.startswith("_")}
Path(output_path).write_text(
json.dumps(result_to_save, indent=2), encoding="utf-8"
)
console.print(f"[green]✓ Saved chunks to: [blue underline]{output_path}[/]")
except Exception as e:
console.print(f"[red]Error saving chunks: {e}[/]")
display_result(f"Chunking {pdf_file.name} ({method}, size={size})", result)
# --- Table Extraction (Requires Docling) ---
console.print(
Rule(f"Table Extraction for {pdf_file.name} (Requires Docling)", style="dim")
)
if _DOCLING_AVAILABLE:
tables_dir = conversion_outputs_dir / f"{pdf_file.stem}_tables"
tables_dir.mkdir(exist_ok=True)
# 6a: Extract as CSV
success, result_csv = await safe_tool_call(
f"Extract {pdf_file.name} Tables (CSV)",
extract_tables, # Call standalone function
tracker=tracker,
document_path=str(pdf_file),
table_mode="csv",
output_dir=str(tables_dir / "csv"),
)
if success and result_csv.get("tables"):
display_result(
f"Extract {pdf_file.name} Tables (CSV)",
result_csv,
{"display_keys": ["tables", "saved_files"], "detail_level": 0},
)
if result_csv["tables"]:
console.print(
Panel(
escape(result_csv["tables"][0][:500]) + "...",
title="First Table Preview (CSV)",
)
)
elif success:
console.print(f"[yellow]No tables found by Docling in {pdf_file.name}.[/]")
# 6b: Extract as JSON
success, result_json = await safe_tool_call(
f"Extract {pdf_file.name} Tables (JSON)",
extract_tables, # Call standalone function
tracker=tracker,
document_path=str(pdf_file),
table_mode="json",
output_dir=str(tables_dir / "json"),
)
if success and result_json.get("tables"):
display_result(
f"Extract {pdf_file.name} Tables (JSON)",
result_json,
{"display_keys": ["tables"], "detail_level": 1},
)
# 6c: Extract as Pandas DataFrame (if available)
if _PANDAS_AVAILABLE:
success, result_pd = await safe_tool_call(
f"Extract {pdf_file.name} Tables (Pandas)",
extract_tables, # Call standalone function
tracker=tracker,
document_path=str(pdf_file),
table_mode="pandas",
output_dir=str(tables_dir / "pandas_csv"), # Save as csv
)
if success and result_pd.get("tables"):
display_result(
f"Extract {pdf_file.name} Tables (Pandas)",
result_pd,
{"display_keys": ["tables"], "detail_level": 0},
)
if result_pd["tables"]:
first_df = result_pd["tables"][0]
if hasattr(first_df, "shape") and hasattr(
first_df, "columns"
): # Check if it looks like a DataFrame
console.print(
Panel(
f"First DataFrame Info:\nShape: {first_df.shape}\nColumns: {list(first_df.columns)}",
title="First DataFrame Preview",
)
)
else:
console.print(
f"[yellow]Pandas result format unexpected: {type(first_df)}[/]"
)
else:
console.print(
"[yellow]Pandas unavailable, skipping Pandas table extraction.[/]"
)
else:
console.print("[yellow]Docling unavailable, skipping table extraction demo.[/]")
except Exception as e:
logger.error(f"Error processing {pdf_file.name} in Sec 6: {e}", exc_info=True)
console.print(f"[bold red]Error processing {pdf_file.name}:[/] {e}")
async def demo_section_7_analysis(sample_files: Dict[str, Path], tracker: CostTracker) -> None:
"""Demonstrate the document analysis tools."""
console.print(Rule("[bold green]Demo 7: Document Analysis Suite[/]", style="green"))
logger.info("Starting Demo Section 7: Document Analysis Suite")
pdf_digital = sample_files.get("pdf_digital")
buffett_pdf = sample_files.get("buffett_pdf")
backprop_pdf = sample_files.get("backprop_pdf")
conversion_outputs_dir = sample_files.get("conversion_outputs_dir")
pdf_files_to_process = [pdf for pdf in [pdf_digital, buffett_pdf, backprop_pdf] if pdf]
if not pdf_files_to_process:
console.print("[yellow]Skipping Demo 7: No PDF files available.[/]")
return
def get_output_path(base_name: str, analysis_type: str, format_name: str = "json") -> str:
return str(conversion_outputs_dir / f"{base_name}_analysis_{analysis_type}.{format_name}")
for pdf_file in pdf_files_to_process:
console.print(
Panel(f"Preparing Text for Analysis from: [cyan]{pdf_file.name}[/]", border_style="dim")
)
success, conv_result = await safe_tool_call(
f"Get Text for {pdf_file.name}",
convert_document, # Call standalone function
tracker=tracker,
document_path=str(pdf_file),
output_format="markdown",
extraction_strategy="direct_text",
enhance_with_llm=False,
)
if not success or not conv_result.get("content"):
console.print(f"[red]Failed to get text for analysis of {pdf_file.name}.[/]")
continue
analysis_text = conv_result["content"]
console.print("[green]✓ Content prepared.[/]")
console.print(
Panel(
escape(truncate_text_by_lines(analysis_text[:600])),
title=f"Text Preview for {pdf_file.name}",
border_style="dim",
)
)
entities_result_for_canon = None
# 7.1 Identify Sections
output_path = get_output_path(pdf_file.stem, "sections")
success, result = await safe_tool_call(
f"Identify Sections in {pdf_file.name}",
identify_sections,
document=analysis_text,
tracker=tracker,
)
if success:
try:
result_to_save = {k: v for k, v in result.items() if not k.startswith("_")}
Path(output_path).write_text(json.dumps(result_to_save, indent=2), encoding="utf-8")
console.print(
f"[green]✓ Saved sections analysis to: [blue underline]{output_path}[/]"
)
except Exception as e:
console.print(f"[red]Error saving analysis: {e}[/]")
display_result(f"Identify Sections ({pdf_file.name})", result)
# 7.2 Extract Entities
output_path = get_output_path(pdf_file.stem, "entities")
success, result = await safe_tool_call(
f"Extract Entities from {pdf_file.name}",
extract_entities,
document=analysis_text,
tracker=tracker,
)
if success:
entities_result_for_canon = result # Save for next step
try:
result_to_save = {k: v for k, v in result.items() if not k.startswith("_")}
Path(output_path).write_text(json.dumps(result_to_save, indent=2), encoding="utf-8")
console.print(
f"[green]✓ Saved entities analysis to: [blue underline]{output_path}[/]"
)
except Exception as e:
console.print(f"[red]Error saving analysis: {e}[/]")
display_result(f"Extract Entities ({pdf_file.name})", result)
# 7.3 Canonicalise Entities
if entities_result_for_canon and entities_result_for_canon.get("entities"):
output_path = get_output_path(pdf_file.stem, "canon_entities")
success, result = await safe_tool_call(
f"Canonicalise Entities for {pdf_file.name}",
canonicalise_entities,
entities_input=entities_result_for_canon,
tracker=tracker,
)
if success:
try:
result_to_save = {k: v for k, v in result.items() if not k.startswith("_")}
Path(output_path).write_text(
json.dumps(result_to_save, indent=2), encoding="utf-8"
)
console.print(
f"[green]✓ Saved canonicalized entities to: [blue underline]{output_path}[/]"
)
except Exception as e:
console.print(f"[red]Error saving analysis: {e}[/]")
display_result(f"Canonicalise Entities ({pdf_file.name})", result)
else:
console.print(
f"[yellow]Skipping canonicalization for {pdf_file.name} (no entities).[/]"
)
# 7.4 Generate QA Pairs
output_path = get_output_path(pdf_file.stem, "qa_pairs")
success, result = await safe_tool_call(
f"Generate QA Pairs for {pdf_file.name}",
generate_qa_pairs,
document=analysis_text,
num_questions=4,
tracker=tracker,
)
if success:
try:
result_to_save = {k: v for k, v in result.items() if not k.startswith("_")}
Path(output_path).write_text(json.dumps(result_to_save, indent=2), encoding="utf-8")
console.print(f"[green]✓ Saved QA pairs to: [blue underline]{output_path}[/]")
except Exception as e:
console.print(f"[red]Error saving QA pairs: {e}[/]")
display_result(f"Generate QA Pairs ({pdf_file.name})", result)
# 7.5 Summarize Document
output_path = get_output_path(pdf_file.stem, "summary", "md")
success, result = await safe_tool_call(
f"Summarize {pdf_file.name}",
summarize_document,
document=analysis_text,
max_length=100,
tracker=tracker,
)
if success:
try:
Path(output_path).write_text(result.get("summary", ""), encoding="utf-8")
except Exception as e:
console.print(f"[red]Error saving summary: {e}[/]")
else:
console.print(f"[green]✓ Saved summary to: [blue underline]{output_path}[/]")
display_result(
f"Summarize {pdf_file.name}", result, {"format_key": {"summary": "text"}}
)
# 7.6 Extract Metrics (Domain specific)
output_path = get_output_path(pdf_file.stem, "metrics")
success, result = await safe_tool_call(
f"Extract Metrics from {pdf_file.name}",
extract_metrics,
document=analysis_text,
tracker=tracker,
)
if success:
try:
result_to_save = {k: v for k, v in result.items() if not k.startswith("_")}
Path(output_path).write_text(json.dumps(result_to_save, indent=2), encoding="utf-8")
console.print(f"[green]✓ Saved metrics to: [blue underline]{output_path}[/]")
except Exception as e:
console.print(f"[red]Error saving metrics: {e}[/]")
display_result(f"Extract Metrics ({pdf_file.name})", result)
if not result.get("metrics"):
console.print(f"[yellow]Note: No pre-defined metrics found in {pdf_file.name}.[/]")
# 7.7 Flag Risks (Domain specific)
output_path = get_output_path(pdf_file.stem, "risks")
success, result = await safe_tool_call(
f"Flag Risks in {pdf_file.name}", flag_risks, document=analysis_text, tracker=tracker
)
if success:
try:
result_to_save = {k: v for k, v in result.items() if not k.startswith("_")}
Path(output_path).write_text(json.dumps(result_to_save, indent=2), encoding="utf-8")
console.print(f"[green]✓ Saved risks analysis to: [blue underline]{output_path}[/]")
except Exception as e:
console.print(f"[red]Error saving risks analysis: {e}[/]")
display_result(f"Flag Risks ({pdf_file.name})", result)
if not result.get("risks"):
console.print(f"[yellow]Note: No pre-defined risks found in {pdf_file.name}.[/]")
async def demo_section_8_batch_processing(
sample_files: Dict[str, Path], tracker: CostTracker
) -> None:
"""Demonstrate the standalone batch processing pipeline."""
console.print(Rule("[bold green]Demo 8: Advanced Batch Processing[/]", style="green"))
logger.info("Starting Demo Section 8: Batch Processing")
pdf_digital = sample_files.get("pdf_digital")
buffett_pdf = sample_files.get("buffett_pdf")
image_file = sample_files.get("image")
conversion_outputs_dir = sample_files.get("conversion_outputs_dir") # noqa: F841
# --- Prepare Batch Inputs ---
batch_inputs = []
if pdf_digital:
batch_inputs.append({"document_path": str(pdf_digital), "item_id": "pdf1"})
if buffett_pdf:
batch_inputs.append({"document_path": str(buffett_pdf), "item_id": "pdf2"})
if image_file:
batch_inputs.append({"image_path": str(image_file), "item_id": "img1"}) # Use image_path
if not batch_inputs:
console.print("[yellow]Skipping batch demo: No suitable input files found.[/]")
return
console.print(f"Prepared {len(batch_inputs)} items for batch processing.")
# --- Define Batch Operations Pipeline ---
# NOTE: We access nested results using input_keys_map pointing to the
# output_key of a previous step (e.g., "conversion_result")
# and then assume the batch processor can handle accessing the nested 'content' field.
# If the batch processor *only* supports top-level keys in input_keys_map,
# this structure would need further adjustment (e.g., adding intermediate steps
# to explicitly pull nested data to the top level if promotion isn't flexible enough).
# Let's proceed assuming the worker logic can handle `item_state[state_key]`
# where `state_key` refers to a previous output key, and we'll access `.content` inside the worker if needed.
# ***Correction***: The worker does NOT handle nested access via dot notation in the map value.
# The map value MUST be a key present in the top-level item_state.
# WORKAROUND: Do not promote output from Step 1. Have subsequent steps map their
# input argument to the desired nested key within the state using `input_keys_map`.
# The batch worker needs modification to support this. Let's try the workaround.
batch_operations = [
# Step 1: Convert PDF/OCR Image to Markdown
{
"operation": "convert_document",
"output_key": "conversion_result", # Result stored here
"params": {
"output_format": "markdown",
"extraction_strategy": "hybrid_direct_ocr",
"enhance_with_llm": True,
"ocr_options": {"dpi": 200},
"accelerator_device": ACCELERATOR_DEVICE,
},
# REMOVED "promote_output": "content"
},
# Step 2: Chunk the resulting markdown content from Step 1
{
"operation": "chunk_document",
# The worker needs to know the input arg name ('document') and the state key to get it from.
"input_keys_map": {
"document": "conversion_result"
}, # Map 'document' arg to the dict from step 1
"output_key": "chunking_result",
"params": {"chunk_method": "paragraph", "chunk_size": 750, "chunk_overlap": 75},
# If we wanted chunks available later, we could promote here:
# "promote_output": "chunks"
},
# Step 3: Generate QA pairs using the *original* markdown from Step 1
{
"operation": "generate_qa_pairs",
"input_keys_map": {
"document": "conversion_result"
}, # Map 'document' arg to the dict from step 1
"output_key": "qa_result",
"params": {"num_questions": 3},
},
# Step 4: Summarize the original converted content from Step 1
{
"operation": "summarize_document",
"input_keys_map": {
"document": "conversion_result"
}, # Map 'document' arg to the dict from step 1
"output_key": "summary_result",
"params": {"max_length": 80},
},
]
# --- Adjusting the worker function to handle dictionary input via input_keys_map ---
# The batch processor's worker (_apply_op_to_item_worker) needs a slight modification
# to handle the case where input_keys_map points to a dictionary result from a previous step,
# and we need to extract a specific field (like 'content') from it.
# Let's modify the worker logic conceptually (assuming this change is made in the actual tool file):
# Inside _apply_op_to_item_worker, when processing input_keys_map:
# ```python
# # ... inside worker ...
# if isinstance(op_input_map, dict):
# for param_name, state_key in op_input_map.items():
# if state_key not in item_state:
# raise ToolInputError(...)
#
# mapped_value = item_state[state_key]
#
# # *** ADDED LOGIC ***
# # If mapped value is a dict from a previous step, and the param_name suggests content ('document', 'text', etc.)
# # try to extract the 'content' field from that dictionary.
# if isinstance(mapped_value, dict) and param_name in ["document", "text", "content"]:
# content_value = mapped_value.get("content")
# if content_value is not None:
# mapped_value = content_value
# else:
# # Maybe try other common keys or raise error if 'content' expected but missing
# logger.warning(f"Mapped input '{state_key}' is dict, but key 'content' not found for param '{param_name}'")
# # Fallback to using the whole dict? Or fail? Let's use whole dict as fallback for now.
# # *** END ADDED LOGIC ***
#
# # Assign the potentially extracted value
# if param_name != primary_input_arg_name:
# call_kwargs[param_name] = mapped_value
# elif call_kwargs.get(primary_input_arg_name) != mapped_value: # Use .get() for safety
# logger.warning(...)
# call_kwargs[primary_input_arg_name] = mapped_value
# # ... rest of worker ...
# ```
# **Assuming this modification is made in the `process_document_batch`'s internal worker**,
# the pipeline definition above should now work correctly.
console.print(
Panel("Defined Batch Pipeline (Corrected Input Mapping):", border_style="magenta")
)
console.print(Syntax(json.dumps(batch_operations, indent=2), "json", theme="default"))
# --- Execute Batch Processing ---
console.print(f"\nExecuting batch pipeline with concurrency {MAX_CONCURRENT_TASKS}...")
try:
# Call the standalone batch processing function
batch_results = await process_document_batch(
inputs=batch_inputs, operations=batch_operations, max_concurrency=MAX_CONCURRENT_TASKS
)
console.print(Rule("[bold]Batch Processing Results[/]", style="blue"))
# --- Display Batch Results ---
if not batch_results:
console.print("[yellow]Batch processing returned no results.[/]")
else:
console.print(f"Processed {len(batch_results)} items.")
for i, item_result in enumerate(batch_results):
item_id = item_result.get("item_id", f"Item {i}")
status = item_result.get("_status", "unknown")
color = (
"green" if status == "processed" else "red" if status == "failed" else "yellow"
)
console.print(
Rule(f"Result for: [bold {color}]{item_id}[/] (Status: {status})", style=color)
)
outputs_table = Table(title="Generated Outputs", box=box.MINIMAL, show_header=False)
outputs_table.add_column("Step", style="cyan")
outputs_table.add_column("Output Key", style="magenta")
outputs_table.add_column("Preview / Status", style="white")
for op_spec in batch_operations:
key = op_spec["output_key"]
step_result = item_result.get(key)
preview = "[dim]Not generated[/]"
if step_result and isinstance(step_result, dict):
step_success = step_result.get("success", False)
preview = (
"[green]Success[/]"
if step_success
else f"[red]Failed: {step_result.get('error_code', 'ERROR')}[/]"
)
if step_success:
if "content" in step_result and isinstance(step_result["content"], str):
preview += f" (Content len: {len(step_result['content'])})"
elif "chunks" in step_result and isinstance(
step_result["chunks"], list
):
preview += f" ({len(step_result['chunks'])} chunks)"
elif "summary" in step_result and isinstance(
step_result.get("summary"), str
):
preview += f" (Summary len: {len(step_result['summary'])})"
elif "qa_pairs" in step_result and isinstance(
step_result.get("qa_pairs"), list
):
preview += f" ({len(step_result['qa_pairs'])} pairs)"
elif "metrics" in step_result and isinstance(
step_result.get("metrics"), dict
):
preview += f" ({len(step_result['metrics'])} metrics)"
elif "risks" in step_result and isinstance(
step_result.get("risks"), dict
):
preview += f" ({len(step_result['risks'])} risks)"
# Add other previews as needed
outputs_table.add_row(op_spec["operation"], key, preview)
console.print(outputs_table)
if item_result.get("_error_log"):
error_panel_content = Text()
for err in item_result["_error_log"]:
error_panel_content.append(
Text.from_markup(f"- [yellow]{escape(err)}[/]\n")
)
console.print(
Panel(error_panel_content, title="Error Log", border_style="yellow")
)
console.print("-" * 30) # Separator
except Exception as e:
logger.error(f"Batch processing demo failed: {e}", exc_info=True)
console.print(f"[bold red]Error during batch processing execution:[/]\n{e}")
async def main():
"""Main function to run the DocumentProcessingTool demo."""
console.print(Rule("[bold] Document Processing Standalone Functions Demo [/bold]", style="blue"))
if not MCP_COMPONENTS_LOADED:
# Error already printed during import attempt
sys.exit(1)
# Set logger level based on environment variable
console.print(f"Docling Available: {_DOCLING_AVAILABLE}")
console.print(f"Pandas Available: {_PANDAS_AVAILABLE}")
console.print(f"Tiktoken Available: {_TIKTOKEN_AVAILABLE}")
console.print(f"Using Accelerator: {ACCELERATOR_DEVICE}")
try:
# Create a CostTracker instance
tracker = CostTracker()
# Create gateway - still useful for initializing providers if needed by underlying tools like generate_completion
gateway = Gateway("doc-proc-standalone-demo", register_tools=False) # Don't register the old tool
logger.info("Initializing gateway and providers (needed for potential LLM calls)...", emoji_key="provider")
try:
await gateway._initialize_providers()
logger.info("Providers initialized.")
except Exception as init_e:
logger.error(f"Failed to initialize providers: {init_e}", exc_info=True)
console.print("[red]Error initializing providers. LLM-dependent operations might fail.[/]")
# --- Prepare sample files ---
logger.info("Setting up sample files and directories...", emoji_key="setup")
DEFAULT_SAMPLE_DIR.mkdir(parents=True, exist_ok=True)
DOWNLOADED_FILES_DIR.mkdir(parents=True, exist_ok=True)
conversion_outputs_dir = DEFAULT_SAMPLE_DIR / "conversion_outputs"
conversion_outputs_dir.mkdir(exist_ok=True)
logger.info(f"Outputs will be saved in: {conversion_outputs_dir}")
sample_files: Dict[str, Any] = {"conversion_outputs_dir": conversion_outputs_dir}
# --- Download Files Concurrently (No shared progress bar) ---
# The download_file_with_progress function will create its own transient progress bar
# if no 'progress' object is passed.
console.print(Rule("Downloading Sample Files", style="blue"))
download_tasks = [
download_file_with_progress(DEFAULT_SAMPLE_PDF_URL, DOWNLOADED_FILES_DIR / "attention_is_all_you_need.pdf", "Transformer Paper (PDF)"), # No progress obj passed
download_file_with_progress(DEFAULT_SAMPLE_IMAGE_URL, DOWNLOADED_FILES_DIR / "sample_ocr_image.tif", "Sample OCR Image (TIFF)"), # No progress obj passed
download_file_with_progress(SAMPLE_HTML_URL, DOWNLOADED_FILES_DIR / "transformer_wiki.html", "Transformer Wiki (HTML)"), # No progress obj passed
download_file_with_progress(BUFFETT_SHAREHOLDER_LETTER_URL, DOWNLOADED_FILES_DIR / "buffett_letter_2022.pdf", "Buffett Letter (PDF)"), # No progress obj passed
download_file_with_progress(BACKPROPAGATION_PAPER_URL, DOWNLOADED_FILES_DIR / "backprop_paper.pdf", "Backprop Paper (PDF)"), # No progress obj passed
]
download_results = await asyncio.gather(*download_tasks)
console.print(Rule("Downloads Complete", style="blue"))
sample_files["pdf_digital"] = download_results[0]
sample_files["image"] = download_results[1]
sample_files["html"] = download_results[2]
sample_files["buffett_pdf"] = download_results[3]
sample_files["backprop_pdf"] = download_results[4]
# --- Run Demo Sections ---
# Pass the necessary sample_files dict and the tracker
await demo_section_1_conversion_ocr(sample_files, tracker)
await demo_section_2_dedicated_ocr(sample_files, tracker)
await demo_section_3_enhance_text(sample_files, tracker)
await demo_section_4_html_markdown(sample_files, tracker)
await demo_section_5_analyze_structure(sample_files, tracker)
await demo_section_6_chunking_tables(sample_files, tracker)
await demo_section_7_analysis(sample_files, tracker)
await demo_section_8_batch_processing(sample_files, tracker)
# --- Display Final Cost Summary ---
console.print(Rule("[bold]Demo Complete - Cost Summary[/]", style="blue"))
tracker.display_summary(console)
except Exception as e:
logger.critical(f"Demo execution failed critically: {str(e)}", exc_info=True)
console.print_exception(show_locals=True) # Use Rich's exception printing
return 1
logger.info("Demo finished successfully.")
return 0
if __name__ == "__main__":
# Run the demo
exit_code = asyncio.run(main())
sys.exit(exit_code)
```