This is page 5 of 6. Use http://codebase.md/cyanheads/atlas-mcp-server?lines=false&page={x} to view the full context.
# Directory Structure
```
├── .clinerules
├── .dockerignore
├── .env.example
├── .github
│ ├── FUNDING.yml
│ └── workflows
│ └── publish.yml
├── .gitignore
├── .ncurc.json
├── .repomixignore
├── automated-tests
│ └── AGENT_TEST_05282025.md
├── CHANGELOG.md
├── CLAUDE.md
├── docker-compose.yml
├── docs
│ └── tree.md
├── examples
│ ├── backup-example
│ │ ├── knowledges.json
│ │ ├── projects.json
│ │ ├── relationships.json
│ │ └── tasks.json
│ ├── deep-research-example
│ │ ├── covington_community_grant_research.md
│ │ └── full-export.json
│ ├── README.md
│ └── webui-example.png
├── LICENSE
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── repomix.config.json
├── scripts
│ ├── clean.ts
│ ├── fetch-openapi-spec.ts
│ ├── make-executable.ts
│ └── tree.ts
├── smithery.yaml
├── src
│ ├── config
│ │ └── index.ts
│ ├── index.ts
│ ├── mcp
│ │ ├── resources
│ │ │ ├── index.ts
│ │ │ ├── knowledge
│ │ │ │ └── knowledgeResources.ts
│ │ │ ├── projects
│ │ │ │ └── projectResources.ts
│ │ │ ├── tasks
│ │ │ │ └── taskResources.ts
│ │ │ └── types.ts
│ │ ├── server.ts
│ │ ├── tools
│ │ │ ├── atlas_database_clean
│ │ │ │ ├── cleanDatabase.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── responseFormat.ts
│ │ │ │ └── types.ts
│ │ │ ├── atlas_deep_research
│ │ │ │ ├── deepResearch.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── responseFormat.ts
│ │ │ │ └── types.ts
│ │ │ ├── atlas_knowledge_add
│ │ │ │ ├── addKnowledge.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── responseFormat.ts
│ │ │ │ └── types.ts
│ │ │ ├── atlas_knowledge_delete
│ │ │ │ ├── deleteKnowledge.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── responseFormat.ts
│ │ │ │ └── types.ts
│ │ │ ├── atlas_knowledge_list
│ │ │ │ ├── index.ts
│ │ │ │ ├── listKnowledge.ts
│ │ │ │ ├── responseFormat.ts
│ │ │ │ └── types.ts
│ │ │ ├── atlas_project_create
│ │ │ │ ├── createProject.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── responseFormat.ts
│ │ │ │ └── types.ts
│ │ │ ├── atlas_project_delete
│ │ │ │ ├── deleteProject.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── responseFormat.ts
│ │ │ │ └── types.ts
│ │ │ ├── atlas_project_list
│ │ │ │ ├── index.ts
│ │ │ │ ├── listProjects.ts
│ │ │ │ ├── responseFormat.ts
│ │ │ │ └── types.ts
│ │ │ ├── atlas_project_update
│ │ │ │ ├── index.ts
│ │ │ │ ├── responseFormat.ts
│ │ │ │ ├── types.ts
│ │ │ │ └── updateProject.ts
│ │ │ ├── atlas_task_create
│ │ │ │ ├── createTask.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── responseFormat.ts
│ │ │ │ └── types.ts
│ │ │ ├── atlas_task_delete
│ │ │ │ ├── deleteTask.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── responseFormat.ts
│ │ │ │ └── types.ts
│ │ │ ├── atlas_task_list
│ │ │ │ ├── index.ts
│ │ │ │ ├── listTasks.ts
│ │ │ │ ├── responseFormat.ts
│ │ │ │ └── types.ts
│ │ │ ├── atlas_task_update
│ │ │ │ ├── index.ts
│ │ │ │ ├── responseFormat.ts
│ │ │ │ ├── types.ts
│ │ │ │ └── updateTask.ts
│ │ │ └── atlas_unified_search
│ │ │ ├── index.ts
│ │ │ ├── responseFormat.ts
│ │ │ ├── types.ts
│ │ │ └── unifiedSearch.ts
│ │ └── transports
│ │ ├── authentication
│ │ │ └── authMiddleware.ts
│ │ ├── httpTransport.ts
│ │ └── stdioTransport.ts
│ ├── services
│ │ └── neo4j
│ │ ├── backupRestoreService
│ │ │ ├── backupRestoreTypes.ts
│ │ │ ├── backupUtils.ts
│ │ │ ├── exportLogic.ts
│ │ │ ├── importLogic.ts
│ │ │ ├── index.ts
│ │ │ └── scripts
│ │ │ ├── db-backup.ts
│ │ │ └── db-import.ts
│ │ ├── driver.ts
│ │ ├── events.ts
│ │ ├── helpers.ts
│ │ ├── index.ts
│ │ ├── knowledgeService.ts
│ │ ├── projectService.ts
│ │ ├── searchService
│ │ │ ├── fullTextSearchLogic.ts
│ │ │ ├── index.ts
│ │ │ ├── searchTypes.ts
│ │ │ └── unifiedSearchLogic.ts
│ │ ├── taskService.ts
│ │ ├── types.ts
│ │ └── utils.ts
│ ├── types
│ │ ├── errors.ts
│ │ ├── mcp.ts
│ │ └── tool.ts
│ ├── utils
│ │ ├── index.ts
│ │ ├── internal
│ │ │ ├── errorHandler.ts
│ │ │ ├── index.ts
│ │ │ ├── logger.ts
│ │ │ └── requestContext.ts
│ │ ├── metrics
│ │ │ ├── index.ts
│ │ │ └── tokenCounter.ts
│ │ ├── parsing
│ │ │ ├── dateParser.ts
│ │ │ ├── index.ts
│ │ │ └── jsonParser.ts
│ │ └── security
│ │ ├── idGenerator.ts
│ │ ├── index.ts
│ │ ├── rateLimiter.ts
│ │ └── sanitization.ts
│ └── webui
│ ├── index.html
│ ├── logic
│ │ ├── api-service.js
│ │ ├── app-state.js
│ │ ├── config.js
│ │ ├── dom-elements.js
│ │ ├── main.js
│ │ └── ui-service.js
│ └── styling
│ ├── base.css
│ ├── components.css
│ ├── layout.css
│ └── theme.css
├── tsconfig.json
├── tsconfig.typedoc.json
└── typedoc.json
```
# Files
--------------------------------------------------------------------------------
/src/utils/security/sanitization.ts:
--------------------------------------------------------------------------------
```typescript
/**
* @fileoverview Provides a comprehensive `Sanitization` class for various input cleaning and validation tasks.
* This module includes utilities for sanitizing HTML, strings, URLs, file paths, JSON, numbers,
* and for redacting sensitive information from data intended for logging.
* @module src/utils/security/sanitization
*/
import path from "path";
import sanitizeHtml from "sanitize-html";
import validator from "validator";
import { BaseErrorCode, McpError } from "../../types/errors.js";
import { logger, requestContextService } from "../index.js";
/**
* Defines options for path sanitization to control how file paths are processed and validated.
*/
export interface PathSanitizeOptions {
/** If provided, restricts sanitized paths to be relative to this directory. */
rootDir?: string;
/** If true, normalizes Windows backslashes to POSIX forward slashes. */
toPosix?: boolean;
/** If true, absolute paths are permitted (subject to `rootDir`). Default: false. */
allowAbsolute?: boolean;
}
/**
* Contains information about a path sanitization operation.
*/
export interface SanitizedPathInfo {
/** The final sanitized and normalized path string. */
sanitizedPath: string;
/** The original path string before any processing. */
originalInput: string;
/** True if the input path was absolute after initial normalization. */
wasAbsolute: boolean;
/** True if an absolute path was converted to relative due to `allowAbsolute: false`. */
convertedToRelative: boolean;
/** The effective options used for sanitization, including defaults. */
optionsUsed: PathSanitizeOptions;
}
/**
* Defines options for context-specific string sanitization.
*/
export interface SanitizeStringOptions {
/** The context in which the string will be used. 'javascript' is disallowed. */
context?: "text" | "html" | "attribute" | "url" | "javascript";
/** Custom allowed HTML tags if `context` is 'html'. */
allowedTags?: string[];
/** Custom allowed HTML attributes if `context` is 'html'. */
allowedAttributes?: Record<string, string[]>;
}
/**
* Configuration options for HTML sanitization, mirroring `sanitize-html` library options.
*/
export interface HtmlSanitizeConfig {
/** An array of allowed HTML tag names. */
allowedTags?: string[];
/** Specifies allowed attributes, either globally or per tag. */
allowedAttributes?: sanitizeHtml.IOptions["allowedAttributes"];
/** If true, HTML comments are preserved. */
preserveComments?: boolean;
/** Custom functions to transform tags during sanitization. */
transformTags?: sanitizeHtml.IOptions["transformTags"];
}
/**
* A singleton class providing various methods for input sanitization.
* Aims to protect against common vulnerabilities like XSS and path traversal.
*/
export class Sanitization {
/** @private */
private static instance: Sanitization;
/**
* Default list of field names considered sensitive for log redaction.
* Case-insensitive matching is applied.
* @private
*/
private sensitiveFields: string[] = [
"password",
"token",
"secret",
"key",
"apiKey",
"auth",
"credential",
"jwt",
"ssn",
"credit",
"card",
"cvv",
"authorization",
];
/**
* Default configuration for HTML sanitization.
* @private
*/
private defaultHtmlSanitizeConfig: HtmlSanitizeConfig = {
allowedTags: [
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"p",
"a",
"ul",
"ol",
"li",
"b",
"i",
"strong",
"em",
"strike",
"code",
"hr",
"br",
"div",
"table",
"thead",
"tbody",
"tr",
"th",
"td",
"pre",
],
allowedAttributes: {
a: ["href", "name", "target"],
img: ["src", "alt", "title", "width", "height"],
"*": ["class", "id", "style"],
},
preserveComments: false,
};
/** @private */
private constructor() {}
/**
* Retrieves the singleton instance of the `Sanitization` class.
* @returns The singleton `Sanitization` instance.
*/
public static getInstance(): Sanitization {
if (!Sanitization.instance) {
Sanitization.instance = new Sanitization();
}
return Sanitization.instance;
}
/**
* Sets or extends the list of sensitive field names for log sanitization.
* @param fields - An array of field names to add to the sensitive list.
*/
public setSensitiveFields(fields: string[]): void {
this.sensitiveFields = [
...new Set([
...this.sensitiveFields,
...fields.map((f) => f.toLowerCase()),
]),
];
const logContext = requestContextService.createRequestContext({
operation: "Sanitization.setSensitiveFields",
newSensitiveFieldCount: this.sensitiveFields.length,
});
logger.debug(
"Updated sensitive fields list for log sanitization",
logContext,
);
}
/**
* Gets a copy of the current list of sensitive field names.
* @returns An array of sensitive field names.
*/
public getSensitiveFields(): string[] {
return [...this.sensitiveFields];
}
/**
* Sanitizes an HTML string by removing potentially malicious tags and attributes.
* @param input - The HTML string to sanitize.
* @param config - Optional custom configuration for `sanitize-html`.
* @returns The sanitized HTML string. Returns an empty string if input is falsy.
*/
public sanitizeHtml(input: string, config?: HtmlSanitizeConfig): string {
if (!input) return "";
const effectiveConfig = { ...this.defaultHtmlSanitizeConfig, ...config };
const options: sanitizeHtml.IOptions = {
allowedTags: effectiveConfig.allowedTags,
allowedAttributes: effectiveConfig.allowedAttributes,
transformTags: effectiveConfig.transformTags,
};
if (effectiveConfig.preserveComments) {
options.allowedTags = [...(options.allowedTags || []), "!--"];
}
return sanitizeHtml(input, options);
}
/**
* Sanitizes a string based on its intended context (e.g., HTML, URL, text).
* **Important:** `context: 'javascript'` is disallowed due to security risks.
*
* @param input - The string to sanitize.
* @param options - Options specifying the sanitization context.
* @returns The sanitized string. Returns an empty string if input is falsy.
* @throws {McpError} If `options.context` is 'javascript', or URL validation fails.
*/
public sanitizeString(
input: string,
options: SanitizeStringOptions = {},
): string {
if (!input) return "";
switch (options.context) {
case "html":
return this.sanitizeHtml(input, {
allowedTags: options.allowedTags,
allowedAttributes: options.allowedAttributes
? this.convertAttributesFormat(options.allowedAttributes)
: undefined,
});
case "attribute":
return sanitizeHtml(input, { allowedTags: [], allowedAttributes: {} });
case "url":
if (
!validator.isURL(input, {
protocols: ["http", "https"],
require_protocol: true,
require_host: true,
})
) {
logger.warning(
"Potentially invalid URL detected during string sanitization (context: url)",
requestContextService.createRequestContext({
operation: "Sanitization.sanitizeString.urlWarning",
invalidUrlAttempt: input,
}),
);
return "";
}
return validator.trim(input);
case "javascript":
logger.error(
"Attempted JavaScript sanitization via sanitizeString, which is disallowed.",
requestContextService.createRequestContext({
operation: "Sanitization.sanitizeString.jsAttempt",
inputSnippet: input.substring(0, 50),
}),
);
throw new McpError(
BaseErrorCode.VALIDATION_ERROR,
"JavaScript sanitization is not supported through sanitizeString due to security risks.",
);
case "text":
default:
return sanitizeHtml(input, { allowedTags: [], allowedAttributes: {} });
}
}
/**
* Converts attribute format for `sanitizeHtml`.
* @param attrs - Attributes in `{ tagName: ['attr1'] }` format.
* @returns Attributes in `sanitize-html` expected format.
* @private
*/
private convertAttributesFormat(
attrs: Record<string, string[]>,
): sanitizeHtml.IOptions["allowedAttributes"] {
return attrs;
}
/**
* Sanitizes a URL string by validating its format and protocol.
* @param input - The URL string to sanitize.
* @param allowedProtocols - Array of allowed URL protocols. Default: `['http', 'https']`.
* @returns The sanitized and trimmed URL string.
* @throws {McpError} If the URL is invalid or uses a disallowed protocol.
*/
public sanitizeUrl(
input: string,
allowedProtocols: string[] = ["http", "https"],
): string {
try {
const trimmedInput = input.trim();
if (
!validator.isURL(trimmedInput, {
protocols: allowedProtocols,
require_protocol: true,
require_host: true,
})
) {
throw new Error("Invalid URL format or protocol not in allowed list.");
}
if (trimmedInput.toLowerCase().startsWith("javascript:")) {
throw new Error("JavaScript pseudo-protocol is not allowed in URLs.");
}
return trimmedInput;
} catch (error) {
throw new McpError(
BaseErrorCode.VALIDATION_ERROR,
error instanceof Error
? error.message
: "Invalid or unsafe URL provided.",
{ input },
);
}
}
/**
* Sanitizes a file path to prevent path traversal and normalize format.
* @param input - The file path string to sanitize.
* @param options - Options to control sanitization behavior.
* @returns An object with the sanitized path and sanitization metadata.
* @throws {McpError} If the path is invalid or unsafe.
*/
public sanitizePath(
input: string,
options: PathSanitizeOptions = {},
): SanitizedPathInfo {
const originalInput = input;
const effectiveOptions: PathSanitizeOptions = {
toPosix: options.toPosix ?? false,
allowAbsolute: options.allowAbsolute ?? false,
rootDir: options.rootDir ? path.resolve(options.rootDir) : undefined,
};
let wasAbsoluteInitially = false;
let convertedToRelative = false;
try {
if (!input || typeof input !== "string")
throw new Error("Invalid path input: must be a non-empty string.");
if (input.includes("\0"))
throw new Error("Path contains null byte, which is disallowed.");
let normalized = path.normalize(input);
wasAbsoluteInitially = path.isAbsolute(normalized);
if (effectiveOptions.toPosix) {
normalized = normalized.replace(/\\/g, "/");
}
let finalSanitizedPath: string;
if (effectiveOptions.rootDir) {
const fullPath = path.resolve(effectiveOptions.rootDir, normalized);
if (
!fullPath.startsWith(effectiveOptions.rootDir + path.sep) &&
fullPath !== effectiveOptions.rootDir
) {
throw new Error(
"Path traversal detected: attempts to escape the defined root directory.",
);
}
finalSanitizedPath = path.relative(effectiveOptions.rootDir, fullPath);
finalSanitizedPath =
finalSanitizedPath === "" ? "." : finalSanitizedPath;
if (
path.isAbsolute(finalSanitizedPath) &&
!effectiveOptions.allowAbsolute
) {
throw new Error(
"Path resolved to absolute outside root when absolute paths are disallowed.",
);
}
} else {
if (path.isAbsolute(normalized)) {
if (!effectiveOptions.allowAbsolute) {
finalSanitizedPath = normalized.replace(
/^(?:[A-Za-z]:)?[/\\]+/,
"",
);
convertedToRelative = true;
} else {
finalSanitizedPath = normalized;
}
} else {
const resolvedAgainstCwd = path.resolve(normalized);
const currentWorkingDir = path.resolve(".");
if (
!resolvedAgainstCwd.startsWith(currentWorkingDir + path.sep) &&
resolvedAgainstCwd !== currentWorkingDir
) {
throw new Error(
"Relative path traversal detected (escapes current working directory context).",
);
}
finalSanitizedPath = normalized;
}
}
return {
sanitizedPath: finalSanitizedPath,
originalInput,
wasAbsolute: wasAbsoluteInitially,
convertedToRelative:
wasAbsoluteInitially &&
!path.isAbsolute(finalSanitizedPath) &&
!effectiveOptions.allowAbsolute,
optionsUsed: effectiveOptions,
};
} catch (error) {
logger.warning(
"Path sanitization error",
requestContextService.createRequestContext({
operation: "Sanitization.sanitizePath.error",
originalPathInput: originalInput,
pathOptionsUsed: effectiveOptions,
errorMessage: error instanceof Error ? error.message : String(error),
}),
);
throw new McpError(
BaseErrorCode.VALIDATION_ERROR,
error instanceof Error
? error.message
: "Invalid or unsafe path provided.",
{ input: originalInput },
);
}
}
/**
* Sanitizes a JSON string by parsing it to validate its format.
* Optionally checks if the JSON string exceeds a maximum allowed size.
* @template T The expected type of the parsed JSON object. Defaults to `unknown`.
* @param input - The JSON string to sanitize/validate.
* @param maxSize - Optional maximum allowed size of the JSON string in bytes.
* @returns The parsed JavaScript object.
* @throws {McpError} If input is not a string, too large, or invalid JSON.
*/
public sanitizeJson<T = unknown>(input: string, maxSize?: number): T {
try {
if (typeof input !== "string")
throw new Error("Invalid input: expected a JSON string.");
if (maxSize !== undefined && Buffer.byteLength(input, "utf8") > maxSize) {
throw new McpError(
BaseErrorCode.VALIDATION_ERROR,
`JSON string exceeds maximum allowed size of ${maxSize} bytes.`,
{ actualSize: Buffer.byteLength(input, "utf8"), maxSize },
);
}
return JSON.parse(input) as T;
} catch (error) {
if (error instanceof McpError) throw error;
throw new McpError(
BaseErrorCode.VALIDATION_ERROR,
error instanceof Error ? error.message : "Invalid JSON format.",
{
inputPreview:
input.length > 100 ? `${input.substring(0, 100)}...` : input,
},
);
}
}
/**
* Validates and sanitizes a numeric input, converting strings to numbers.
* Clamps the number to `min`/`max` if provided.
* @param input - The number or string to validate and sanitize.
* @param min - Minimum allowed value (inclusive).
* @param max - Maximum allowed value (inclusive).
* @returns The sanitized (and potentially clamped) number.
* @throws {McpError} If input is not a valid number, NaN, or Infinity.
*/
public sanitizeNumber(
input: number | string,
min?: number,
max?: number,
): number {
let value: number;
if (typeof input === "string") {
const trimmedInput = input.trim();
if (trimmedInput === "" || !validator.isNumeric(trimmedInput)) {
throw new McpError(
BaseErrorCode.VALIDATION_ERROR,
"Invalid number format: input is empty or not numeric.",
{ input },
);
}
value = parseFloat(trimmedInput);
} else if (typeof input === "number") {
value = input;
} else {
throw new McpError(
BaseErrorCode.VALIDATION_ERROR,
"Invalid input type: expected number or string.",
{ input: String(input) },
);
}
if (isNaN(value) || !isFinite(value)) {
throw new McpError(
BaseErrorCode.VALIDATION_ERROR,
"Invalid number value (NaN or Infinity).",
{ input },
);
}
let clamped = false;
let originalValueForLog = value;
if (min !== undefined && value < min) {
value = min;
clamped = true;
}
if (max !== undefined && value > max) {
value = max;
clamped = true;
}
if (clamped) {
logger.debug(
"Number clamped to range.",
requestContextService.createRequestContext({
operation: "Sanitization.sanitizeNumber.clamped",
originalInput: String(input),
parsedValue: originalValueForLog,
minValue: min,
maxValue: max,
clampedValue: value,
}),
);
}
return value;
}
/**
* Sanitizes input for logging by redacting sensitive fields.
* Creates a deep clone and replaces values of fields matching `this.sensitiveFields`
* (case-insensitive substring match) with "[REDACTED]".
* @param input - The input data to sanitize for logging.
* @returns A sanitized (deep cloned) version of the input, safe for logging.
* Returns original input if not object/array, or "[Log Sanitization Failed]" on error.
*/
public sanitizeForLogging(input: unknown): unknown {
try {
if (!input || typeof input !== "object") return input;
const clonedInput =
typeof structuredClone === "function"
? structuredClone(input)
: JSON.parse(JSON.stringify(input));
this.redactSensitiveFields(clonedInput);
return clonedInput;
} catch (error) {
logger.error(
"Error during log sanitization, returning placeholder.",
requestContextService.createRequestContext({
operation: "Sanitization.sanitizeForLogging.error",
errorMessage: error instanceof Error ? error.message : String(error),
}),
);
return "[Log Sanitization Failed]";
}
}
/**
* Recursively redacts sensitive fields in an object or array in place.
* @param obj - The object or array to redact.
* @private
*/
private redactSensitiveFields(obj: unknown): void {
if (!obj || typeof obj !== "object") return;
if (Array.isArray(obj)) {
obj.forEach((item) => this.redactSensitiveFields(item));
return;
}
for (const key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) {
const value = (obj as Record<string, unknown>)[key];
const lowerKey = key.toLowerCase();
const isSensitive = this.sensitiveFields.some((field) =>
lowerKey.includes(field),
);
if (isSensitive) {
(obj as Record<string, unknown>)[key] = "[REDACTED]";
} else if (value && typeof value === "object") {
this.redactSensitiveFields(value);
}
}
}
}
}
/**
* Singleton instance of the `Sanitization` class.
* Use this for all input sanitization tasks.
*/
export const sanitization = Sanitization.getInstance();
/**
* Convenience function calling `sanitization.sanitizeForLogging`.
* @param input - The input data to sanitize.
* @returns A sanitized version of the input, safe for logging.
*/
export const sanitizeInputForLogging = (input: unknown): unknown =>
sanitization.sanitizeForLogging(input);
```
--------------------------------------------------------------------------------
/examples/backup-example/knowledges.json:
--------------------------------------------------------------------------------
```json
[
{
"createdAt": "2025-03-26T18:42:38.878Z",
"citations": [
"https://github.com/cyanheads/datasync",
"https://datasync.dev/docs",
"https://community.datasync.dev",
"https://hub.docker.com/r/cyanheads/datasync"
],
"domain": "technical",
"id": "know_8edd2c00340b4959b5dd7bd493484a78",
"text": "# Project: DataSync - Open Source ETL Tool\n\n## Overview\nDataSync is an open-source Extract, Transform, Load (ETL) tool designed to simplify data integration between disparate systems. The project focuses on providing an intuitive UI for designing data pipelines while maintaining the flexibility and power required for complex enterprise scenarios. As the lead contributor and maintainer, cyanheads built this tool to address the gap between expensive enterprise ETL solutions and limited open-source alternatives.\n\n## Technologies Used\n- **Frontend:** React.js, Redux, Flow-based programming UI (custom-built)\n- **Backend:** Node.js, Express, Bull for job queuing\n- **Database:** MongoDB for configuration, plugin system for source/destination connectors\n- **Processing Engine:** Custom parallel processing engine written in Rust for performance\n- **Monitoring:** Prometheus, Grafana dashboards\n- **Testing:** Jest, Supertest, Property-based testing\n\n## Key Features\n- Visual pipeline designer with drag-and-drop interface\n- Pre-built connectors for 30+ common data sources and destinations\n- Custom transformation logic using JavaScript or Python\n- Real-time monitoring and alerting for data pipeline health\n- Fault-tolerance with automatic retry mechanisms\n- Extensive logging and audit trails\n- Horizontal scaling capabilities for high-volume workloads\n- REST API for programmatic control\n\n## Challenges and Solutions\n- **Challenge:** Creating an intuitive UI for complex data transformation logic.\n **Solution:** Developed a custom visual programming interface that abstracts complex operations while still allowing for detailed customization when needed.\n\n- **Challenge:** Handling diverse data formats and schemas efficiently.\n **Solution:** Implemented a unified data model internally with automatic schema mapping and validation.\n\n- **Challenge:** Ensuring reliability for mission-critical data pipelines.\n **Solution:** Built comprehensive error handling, checkpointing, and recovery mechanisms with detailed observability.\n\n## Results\n- Over 5,000 GitHub stars and 170+ contributors from the community\n- 2,000+ active installations across organizations of all sizes\n- Powers data pipelines processing over 50TB of data daily in production environments\n- Featured in \"Top Data Tools of 2024\" by Data Engineering Weekly\n- Regular contributions from developers at major tech companies\n\n## Links\n- [GitHub Repository](https://github.com/cyanheads/datasync)\n- [Documentation](https://datasync.dev/docs)\n- [Community Forum](https://community.datasync.dev)\n- [Docker Hub](https://hub.docker.com/r/cyanheads/datasync)",
"projectId": "portfolio-main",
"tags": [
"project",
"open source",
"ETL",
"data engineering",
"Node.js",
"React",
"Rust"
],
"updatedAt": "2025-03-26T18:42:38.878Z"
},
{
"createdAt": "2025-03-26T18:42:21.219Z",
"citations": [
"https://github.com/cyanheads/codementor-platform",
"https://codementor.dev",
"https://medium.com/@cyanheads/building-an-interactive-learning-platform-7f3e9a2b8d12"
],
"domain": "technical",
"id": "know_ea8b3222e36d4e26b73a0d865312413f",
"text": "# Project: CodeMentor Learning Platform\n\n## Overview\nCodeMentor is an interactive learning platform designed to help aspiring developers master programming skills through hands-on projects, real-time code evaluation, and personalized learning paths. The platform bridges the gap between theoretical knowledge and practical application by simulating real-world development scenarios.\n\n## Technologies Used\n- **Frontend:** Vue.js, Nuxt.js, TypeScript, Monaco Editor (VS Code-based)\n- **Backend:** Go, GraphQL, WebSockets for real-time features\n- **Database:** PostgreSQL, Redis for caching\n- **DevOps:** Docker, Kubernetes, GitHub Actions\n- **Cloud Infrastructure:** Google Cloud Platform (GCP)\n- **Testing:** Jest, Cypress, Go testing framework\n- **Code Execution:** Secure containerized execution environment\n\n## Key Features\n- Interactive code editor with real-time syntax checking and suggestions\n- Automated assessment and feedback on code quality and performance\n- Project-based learning paths with progressive difficulty levels\n- AI-powered personalized learning recommendations\n- Code challenges with time constraints and leaderboards\n- Mentor matching system for 1-on-1 guidance\n- Collaborative coding rooms for pair programming\n\n## Challenges and Solutions\n- **Challenge:** Creating a secure environment for executing untrusted user code.\n **Solution:** Implemented a sandboxed containerization system with strict resource limits and security policies that isolates each code execution instance.\n\n- **Challenge:** Providing real-time feedback for multiple programming languages.\n **Solution:** Developed a microservice architecture where language-specific services analyze code and provide instant feedback through WebSocket connections.\n\n- **Challenge:** Scaling during usage spikes without performance degradation.\n **Solution:** Implemented auto-scaling clusters on GCP with intelligent caching strategies and resource allocation based on usage patterns.\n\n## Results\n- Platform serves over 50,000 active monthly users\n- Users experience an average 40% faster learning progression compared to traditional methods\n- 78% completion rate for learning paths (industry average is ~15%)\n- Winner of 'Best Educational Tech Platform' at Seattle Tech Awards 2024\n- Successfully partnered with 15 tech companies for recruitment opportunities\n\n## Links\n- [GitHub Repository](https://github.com/cyanheads/codementor-platform)\n- [Live Platform](https://codementor.dev)\n- [Case Study](https://medium.com/@cyanheads/building-an-interactive-learning-platform-7f3e9a2b8d12)",
"projectId": "portfolio-main",
"tags": [
"project",
"education",
"learning platform",
"Vue.js",
"Go",
"interactive coding"
],
"updatedAt": "2025-03-26T18:42:21.219Z"
},
{
"createdAt": "2025-03-26T18:42:55.013Z",
"citations": [
"https://github.com/cyanheads/neurochat",
"https://neurochat.app",
"https://doi.org/10.1145/3581941.3581954"
],
"domain": "technical",
"id": "know_1d9b88ea4f2f448c926382e7d899a27c",
"text": "# Project: NeuroChat AI Assistant\n\n## Overview\nNeuroChat is an AI-powered conversational assistant designed specifically for neurodivergent individuals, focusing on clear communication, routine management, and emotional support. The application uses natural language processing to provide a patient, unambiguous interface that adapts to the user's communication preferences and cognitive needs.\n\n## Technologies Used\n- **Frontend:** React, Typescript, Progressive Web App (PWA)\n- **Backend:** Python, FastAPI, WebSockets\n- **AI/ML:** PyTorch, Hugging Face Transformers, Custom NLP models\n- **Database:** MongoDB for user data, Pinecone for vector embeddings\n- **Infrastructure:** AWS (ECS, Lambda, SageMaker)\n- **Security:** End-to-end encryption, SOC 2 compliance\n- **Accessibility:** WCAG 2.1 AAA compliance, screen reader optimization\n\n## Key Features\n- Adaptive conversation style based on user preferences and cognitive profile\n- Visual schedule and routine management with customizable reminders\n- Emotional intelligence features for recognizing user states and providing appropriate support\n- Simplified explanation mode for complex topics with optional visual aids\n- Sensory overload detection and calming interface transitions\n- Privacy-focused design with local processing for sensitive data\n- Customizable UI with sensory-friendly themes and layouts\n\n## Challenges and Solutions\n- **Challenge:** Creating AI models that truly understand and accommodate neurodivergent communication patterns.\n **Solution:** Collaborated with neurodivergent consultants and collected diverse training data, then fine-tuned models specifically for different cognitive profiles.\n\n- **Challenge:** Balancing feature richness with simplicity and clarity of interface.\n **Solution:** Implemented progressive disclosure UI patterns and user-controlled complexity levels that adapt to individual preferences and cognitive load.\n\n- **Challenge:** Ensuring the app remains helpful without becoming overwhelming.\n **Solution:** Developed an attention-aware interface that monitors signs of cognitive load and adjusts information density and interaction patterns accordingly.\n\n## Results\n- User base of 35,000+ active monthly users\n- Average session time of 18 minutes, indicating strong engagement\n- 92% of users report improved daily functioning and reduced anxiety\n- Winner of the 'Tech for Good' award at the Inclusive Innovation Summit\n- Featured in psychology and technology publications for pioneering approach\n\n## Links\n- [GitHub Repository](https://github.com/cyanheads/neurochat)\n- [Web Application](https://neurochat.app)\n- [Research Paper](https://doi.org/10.1145/3581941.3581954)",
"projectId": "portfolio-main",
"tags": [
"project",
"AI",
"accessibility",
"neurodiversity",
"Python",
"React",
"NLP"
],
"updatedAt": "2025-03-26T18:42:55.013Z"
},
{
"createdAt": "2025-03-26T18:43:13.677Z",
"citations": [],
"domain": "technical",
"id": "know_5894c5e7cd674206b82470c4d46265e7",
"text": "# About Cyanheads: Development Philosophy and Personal Approach\n\n## Development Philosophy\n\nAs a software developer, I approach each project with a blend of pragmatism and innovation. I believe that great software strikes a balance between elegant technical solutions and genuine user needs. My development philosophy centers around these core principles:\n\n### User-Centered Design\nTechnology should serve people, not the other way around. I start every project by deeply understanding the end users and their needs, then work backward to create solutions that feel intuitive and empowering.\n\n### Sustainable Code\nI write code that's meant to last. This means emphasizing readability, thorough documentation, comprehensive testing, and thoughtful architecture that can evolve over time without accumulating technical debt.\n\n### Continuous Learning\nThe tech landscape evolves rapidly, and I embrace this by dedicating time each week to learning new tools, languages, and approaches. I believe that being a great developer means being a perpetual student.\n\n### Ethical Technology\nI'm committed to building technology that respects user privacy, promotes accessibility, and considers potential social impacts. I believe that ethical considerations should be built into the development process from day one.\n\n## Personal Interests\n\nBeyond coding, I'm passionate about several interests that inform my work and keep me balanced:\n\n### Open Source Contribution\nI'm an active contributor to several open source projects, particularly in the data visualization and accessibility spaces. I believe in giving back to the community that has provided so many tools and opportunities.\n\n### Hiking and Outdoor Photography\nLiving in Seattle provides amazing access to nature. I frequently explore the hiking trails around the Pacific Northwest, capturing landscape photography that helps me maintain perspective and creativity.\n\n### Electronic Music Production\nAs a hobby producer, I create ambient and electronic music that exercises the creative parts of my brain in different ways than coding. The logical patterns of music production have surprising parallels to software architecture.\n\n### Tech Mentorship\nI regularly mentor coding bootcamp graduates and early-career developers. Helping others navigate their tech journey is incredibly rewarding and helps me continually reassess and articulate my own understanding.\n\n### Language Learning\nI'm currently learning Japanese and have intermediate proficiency in Spanish. I find that understanding different languages improves my ability to think systematically and approach problems from multiple angles.\n\n## Work-Life Integration\n\nRather than pursuing traditional work-life balance, I prefer work-life integration where my professional skills, personal interests, and values form a cohesive whole. This approach keeps me energized, prevents burnout, and allows me to bring my authentic self to both my personal projects and professional work.",
"projectId": "portfolio-main",
"tags": ["personal", "philosophy", "interests", "values", "approach"],
"updatedAt": "2025-03-26T18:43:13.677Z"
},
{
"createdAt": "2025-03-26T18:41:29.437Z",
"citations": [],
"domain": "technical",
"id": "know_c9ebf9d01d0841b28fbdfc508a3754ef",
"text": "# Professional Background for cyanheads\n\nCyanheads (Alex Chen) is a full-stack software developer based in Seattle, WA with 7 years of professional experience in the tech industry. They specialize in building scalable web applications and cloud-native solutions with a focus on excellent user experience and performance.\n\n## Career History\n\n### Senior Software Engineer at Nimbus Cloud Solutions (2023-Present)\n- Lead developer for the company's flagship cloud management platform\n- Architected and implemented microservices infrastructure using Kubernetes and Docker\n- Mentored junior developers and established coding standards and best practices\n- Technologies: TypeScript, React, Node.js, PostgreSQL, Kubernetes, AWS\n\n### Software Engineer at TechForward (2020-2023)\n- Developed and maintained multiple client-facing web applications\n- Implemented CI/CD pipelines that reduced deployment time by 70%\n- Collaborated with design and product teams to create intuitive user interfaces\n- Technologies: JavaScript, React, Express, MongoDB, GitHub Actions, Azure\n\n### Frontend Developer at InnovateSoft (2018-2020)\n- Created responsive web interfaces for enterprise clients\n- Refactored legacy codebase to modern standards, improving performance by 40%\n- Implemented comprehensive unit and integration test suites\n- Technologies: JavaScript, Vue.js, CSS/SCSS, Jest\n\n## Education\n\n- Bachelor of Science in Computer Science, University of Washington (2014-2018)\n- AWS Certified Solutions Architect - Associate\n- MongoDB Certified Developer",
"projectId": "portfolio-main",
"tags": ["bio", "professional background", "career", "education"],
"updatedAt": "2025-03-26T18:41:29.437Z"
},
{
"createdAt": "2025-03-26T18:41:39.858Z",
"citations": [],
"domain": "technical",
"id": "know_111b5c3bbc3d416883aab006ac1b8f2c",
"text": "# Technical Skills Profile\n\n## Programming Languages\n- **Advanced:** JavaScript/TypeScript, Python, HTML/CSS\n- **Intermediate:** Go, SQL, Rust, Bash/Shell\n- **Familiar:** C++, Java, Ruby\n\n## Frontend Technologies\n- **Frameworks/Libraries:** React.js, Vue.js, Next.js, Svelte\n- **State Management:** Redux, Zustand, Pinia, Context API\n- **Styling:** Tailwind CSS, styled-components, SCSS/SASS, CSS Modules\n- **Testing:** Jest, React Testing Library, Cypress\n\n## Backend Technologies\n- **Frameworks/Libraries:** Node.js (Express, Nest.js), FastAPI, Django\n- **Databases:** PostgreSQL, MongoDB, Redis, MySQL, DynamoDB\n- **API Design:** RESTful APIs, GraphQL, gRPC, Swagger/OpenAPI\n- **Authentication:** OAuth 2.0, JWT, Auth0, Firebase Auth\n\n## DevOps & Cloud\n- **Cloud Platforms:** AWS (Certified Associate), Azure, Google Cloud Platform\n- **Infrastructure as Code:** Terraform, CloudFormation, Pulumi\n- **CI/CD:** GitHub Actions, Jenkins, CircleCI, ArgoCD\n- **Containerization:** Docker, Kubernetes, Docker Compose\n- **Monitoring:** Prometheus, Grafana, New Relic, AWS CloudWatch\n\n## Tools & Methodologies\n- **Version Control:** Git, GitHub, GitLab, Bitbucket\n- **Project Management:** Agile, Scrum, Kanban, Jira, Trello\n- **Design:** Figma, Adobe XD, Responsive Design principles\n- **Testing Methodologies:** TDD, BDD, Unit/Integration Testing\n\n## Other Skills\n- Web Performance Optimization\n- Serverless Architecture\n- JAMstack Development\n- Progressive Web Apps (PWAs)\n- Accessibility (WCAG) implementation\n- SEO best practices",
"projectId": "portfolio-main",
"tags": [
"skills",
"technical capabilities",
"programming",
"tools",
"frameworks"
],
"updatedAt": "2025-03-26T18:41:39.858Z"
},
{
"createdAt": "2025-03-26T18:41:52.619Z",
"citations": [
"https://github.com/cyanheads/cloudnative-dashboard",
"https://demo.cloudnative-dashboard.com"
],
"domain": "technical",
"id": "know_b3881c98f99c41a0992686aee015ad57",
"text": "# Project: CloudNative Dashboard\n\n## Overview\nA comprehensive cloud resource management platform that helps DevOps teams monitor and optimize their infrastructure across multiple cloud providers. The dashboard provides real-time analytics, cost optimization recommendations, and automated scaling capabilities.\n\n## Technologies Used\n- **Frontend:** React.js, TypeScript, D3.js for data visualization, Tailwind CSS\n- **Backend:** Node.js, Express, GraphQL API\n- **Database:** PostgreSQL, Redis for caching\n- **Infrastructure:** AWS (EC2, Lambda, S3, DynamoDB)\n- **Monitoring:** Prometheus, Grafana\n- **CI/CD:** GitHub Actions, Docker\n\n## Key Features\n- Multi-cloud provider integration (AWS, Azure, GCP)\n- Real-time resource utilization monitoring with customizable alerts\n- Cost forecasting and anomaly detection\n- Infrastructure as Code templates generation\n- Role-based access control system\n- Automated scaling policies based on custom metrics\n\n## Challenges and Solutions\n- **Challenge:** Handling large volumes of real-time data from multiple cloud providers.\n **Solution:** Implemented a scalable event-driven architecture with message queues and workers to process data asynchronously.\n\n- **Challenge:** Creating intuitive visualizations for complex cloud resource relationships.\n **Solution:** Developed custom D3.js visualization components with interactive tooltips and drill-down capabilities.\n\n- **Challenge:** Maintaining consistent performance across different user scenarios.\n **Solution:** Implemented aggressive caching strategies and GraphQL query optimization to reduce response times.\n\n## Results\n- Reduced cloud infrastructure costs by 35% for pilot customers\n- Improved incident response time by 60% with real-time alerting\n- Successfully onboarded 12 enterprise clients within the first 6 months\n- Achieved 99.95% uptime for the platform\n\n## Links\n- [GitHub Repository](https://github.com/cyanheads/cloudnative-dashboard)\n- [Live Demo](https://demo.cloudnative-dashboard.com)",
"projectId": "portfolio-main",
"tags": [
"project",
"cloud",
"dashboard",
"AWS",
"React",
"Node.js",
"DevOps"
],
"updatedAt": "2025-03-26T18:41:52.619Z"
},
{
"createdAt": "2025-03-26T18:42:05.541Z",
"citations": [
"https://github.com/cyanheads/ecotrack-app",
"https://apps.apple.com/us/app/ecotrack",
"https://play.google.com/store/apps/details?id=com.cyanheads.ecotrack"
],
"domain": "technical",
"id": "know_24f1739b78e1430e90a89cfe6c208d63",
"text": "# Project: EcoTrack Mobile App\n\n## Overview\nEcoTrack is a mobile application that helps environmentally-conscious individuals track and reduce their carbon footprint through daily activities. The app combines gamification with practical sustainability tools to encourage users to adopt eco-friendly habits and make informed environmental choices.\n\n## Technologies Used\n- **Frontend:** React Native, TypeScript, Redux\n- **Backend:** Python, FastAPI, Celery\n- **Database:** MongoDB, PostgreSQL for analytics\n- **APIs:** Google Maps API, Carbon Footprint API, Weather API\n- **Authentication:** Firebase Auth, OAuth 2.0\n- **Analytics:** TensorFlow for usage pattern analysis\n- **CI/CD:** Bitbucket Pipelines, Fastlane\n\n## Key Features\n- Personalized carbon footprint calculator based on lifestyle choices\n- Transportation mode detection and eco-friendly route recommendations\n- Gamified challenges and community competitions\n- Local sustainable business directory\n- Detailed analytics and progress tracking\n- Social sharing and friend challenges\n- AR feature to visualize environmental impact\n\n## Challenges and Solutions\n- **Challenge:** Accurately determining carbon footprint from varied user activities.\n **Solution:** Developed a machine learning model trained on verified carbon emission datasets to provide accurate estimates based on user inputs and activity patterns.\n\n- **Challenge:** Battery drain from continuous location tracking.\n **Solution:** Implemented intelligent tracking algorithms that adjust frequency based on user movement patterns and battery levels.\n\n- **Challenge:** Making complex environmental data accessible and engaging.\n **Solution:** Created an intuitive visualization system with customizable dashboards and achievement-based progression.\n\n## Results\n- Over 100,000 downloads on iOS and Android platforms\n- Average user carbon footprint reduction of 15% after 3 months of use\n- Featured in \"Best Environmental Apps of 2024\" by GreenTech Magazine\n- 4.7/5 star rating across app stores\n- Successfully raised $1.2M in seed funding based on app traction\n\n## Links\n- [GitHub Repository](https://github.com/cyanheads/ecotrack-app)\n- [App Store](https://apps.apple.com/us/app/ecotrack)\n- [Google Play Store](https://play.google.com/store/apps/details?id=com.cyanheads.ecotrack)",
"projectId": "portfolio-main",
"tags": [
"project",
"mobile app",
"React Native",
"Python",
"sustainability",
"carbon footprint"
],
"updatedAt": "2025-03-26T18:42:05.541Z"
}
]
```
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
```markdown
# Changelog
All notable changes to this project will be documented in this file.
## [2.8.15] - 2025-06-05
- **Documentation**:
- Updated `README.md` formatting.
- Updated .gitignore
- Updated .dockerignore
- New version to fix NPM publish issues.
## [2.8.14] - 2025-06-05
### Changed
- **Dependencies**:
- Updated `@modelcontextprotocol/sdk` to `^1.12.1`.
- Updated `@types/node` to `^22.15.29`.
- Updated `ignore` to `^7.0.5`.
- Updated `node-cron` to `^4.1.0`.
- Updated `openai` to `^5.1.1`.
- Updated `zod` to `^3.25.51`.
- **Refactoring**:
- Updated `scripts/tree.ts` to use the `ignore` library for pattern matching and to compare existing tree content before overwriting `docs/tree.md`.
- Refined server instance handling and startup logic in `src/index.ts`, `src/mcp/server.ts`, `src/mcp/transports/httpTransport.ts`, and `src/mcp/transports/stdioTransport.ts`.
- Improved comments, logging, and JWT scope claim handling in `src/mcp/transports/authentication/authMiddleware.ts`.
- Enhanced `src/mcp/transports/httpTransport.ts` with rate limiting middleware and improved origin checking logic.
- Minor logging improvements in `src/utils/internal/logger.ts` regarding log directory handling and JSON stringification of BigInts.
- **Configuration**:
- Minor formatting update to `.ncurc.json`.
- **Web UI**:
- Minor HTML structural and class name adjustments in `src/webui/index.html` and related JavaScript files.
- **Documentation**:
- Updated `README.md` to reflect removal of LLM provider configurations and project structure entries. Version bumped to 2.8.14.
- Regenerated `docs/tree.md` to reflect file changes and new generation timestamp.
- **Version**: Bumped project version to 2.8.14 in `package.json` and `package-lock.json`.
### Removed
- **LLM Provider Framework**:
- Removed the LLM provider integration, including `llmFactory.ts`, `openRouterProvider.ts`, and the `src/services/llm-providers/` directory.
- Removed related LLM configuration options from `src/config/index.ts` (e.g., `OPENROUTER_API_KEY`, `GEMINI_API_KEY`, `LLM_DEFAULT_MODEL`).
- Removed `@google/genai` dependency.
## [2.8.13] - 2025-05-29
### Added
- **LLM Provider Framework**:
- Introduced `llmFactory.ts` in `src/services/llm-providers/` to manage and instantiate different LLM provider clients.
- Added support for Google Gemini models via the `@google/genai` SDK. This includes a new `GEMINI_API_KEY` in the configuration (`src/config/index.ts`).
- **Configuration**: Added `.ncurc.json` for `npm-check-updates` configuration.
### Changed
- **LLM Provider Refactoring**:
- Relocated the OpenRouter provider logic from `src/services/llm-providers/openRouterProvider.ts` to a dedicated subdirectory `src/services/llm-providers/openRouter/` (containing `openRouterProvider.ts` and `index.ts`).
- Updated the main LLM provider barrel file (`src/services/llm-providers/index.ts`) to export from the new `openRouter/` directory.
- **Dependencies**:
- Added `@google/genai` (v1.1.0) to `package.json` and `package-lock.json`.
- Updated `@types/node` to `^22.15.24`.
- Updated `zod` to `^3.25.33`.
- **Documentation**:
- Updated version in `README.md` to 2.8.13.
- Regenerated `docs/tree.md` to reflect new file structure and timestamp.
- **Version**: Bumped project version to 2.8.13 in `package.json` and `package-lock.json`.
### Removed
- Deleted the old `src/services/llm-providers/openRouterProvider.ts` as its contents were refactored into the `src/services/llm-providers/openRouter/` directory.
## [2.8.12] - 2025-05-28
### Changed
- **Web UI**:
- Major refactor of the Web UI, modularizing JavaScript into `src/webui/logic/` and CSS into `src/webui/styling/`.
- Updated `src/webui/index.html` to link to new modularized assets.
- Modified `package.json` `webui` script to use `npx serve src/webui -l 8000` for a local development server.
- **Neo4j Services Refactoring**:
- Moved `backupRestoreService.ts` into its own directory `src/services/neo4j/backupRestoreService/`.
- Moved `searchService.ts` into its own directory `src/services/neo4j/searchService/`.
- Updated all relevant import paths across the codebase to reflect these changes.
- **Database Scripts**:
- Relocated `db-backup.ts` and `db-import.ts` from the root `scripts/` directory to `src/services/neo4j/backupRestoreService/scripts/`.
- Updated `package.json` script paths for `db:backup` and `db:import` accordingly.
- **Documentation**:
- Updated `README.md` to reflect the new paths for database backup and restore scripts, and version bump to 2.8.12.
- Updated `docs/tree.md` to reflect the new file structure and generation timestamp.
- **Dependencies**: Updated `package-lock.json` and `package.json` (version bump to 2.8.12, script paths).
### Added
- **Web UI Features**:
- Added "Project Task Board" to `src/webui/index.html`.
- Added "Data Explorer" to `src/webui/index.html`.
- Added `CLAUDE.md`.
- Added `automated-tests/AGENT_TEST_05282025.md`.
## [2.8.11] - 2025-05-28
### Changed
- **Neo4j Services Refactoring**:
- Moved `backupRestoreService.ts` into its own directory `src/services/neo4j/backupRestoreService/`.
- Moved `searchService.ts` into its own directory `src/services/neo4j/searchService/`.
- Updated all relevant import paths across the codebase to reflect these changes.
- **Database Scripts**:
- Relocated `db-backup.ts` and `db-import.ts` from the root `scripts/` directory to `src/services/neo4j/backupRestoreService/scripts/`.
- Updated `package.json` script paths for `db:backup` and `db:import` accordingly.
- **Documentation**:
- Updated `README.md` to reflect the new paths for database backup and restore scripts.
- Updated `docs/tree.md` to reflect the new file structure and generation timestamp.
- **Dependencies**: Updated `package-lock.json` due to changes in `package.json` (script paths).
### Added
- Added `CLAUDE.md`.
- Added `automated-tests/AGENT_TEST_05282025.md`.
## [2.8.10] - 2025-05-28
### Fixed
- **Dependency**: Downgraded `chrono-node` from `^2.8.1` to `2.8.0` to resolve an issue.
### Changed
- **CLI**: Updated the binary command name from `atlas-mcp` to `atlas-mcp-server` in `package.json`.
- **Scripts**: Modified the `inspector` script in `package.json` to use the new `atlas-mcp-server` binary name.
### Documentation
- Updated MCP version badge in `README.md` to `1.12.0`.
- Updated application version to `2.8.10` in `README.md`.
## [2.8.9] - 2025-05-28
### Changed
- **Unified Search System**:
- Changed default fuzzy search behavior from `false` to `true` for more intuitive "contains" matching on specific properties
- Enhanced search routing logic to automatically choose between full-text and regex search based on property type and entity combination
- Improved property handling with separate parameters for Cypher queries vs internal logic checks for better maintainability
- **Database Performance**:
- Optimized relationship import performance in backup/restore service using UNWIND batch operations instead of individual queries (500 relationships per batch)
- Removed unused `knowledge_domain` index to reduce database overhead
- **Code Quality**:
- Extracted `escapeRelationshipType` helper function to shared `helpers.ts` for better code reuse
- Applied consistent formatting across multiple files (import organization, quote style, spacing)
### Documentation
- Updated `docs/tree.md` generation timestamp to reflect current state
- Applied consistent markdown formatting to CHANGELOG for better readability
- **Production Readiness Assessment**: Added comprehensive automated test report (`automated-tests/AGENT_TEST_05282025.md`) with 92/100 production readiness score, covering all 14 tools and 8 MCP resources with detailed testing results, error handling validation, and deployment recommendations
## [2.8.8] - 2025-05-22
### Added
- **LLM Integration**: Introduced a new service for LLM provider integrations, starting with OpenRouter support (`src/services/llm-providers/`).
- **Project Status**: Added "in-progress" as a valid status for projects in `atlas_project_list` tool types.
- **Error Codes**: Expanded `BaseErrorCode` enum in `src/types/errors.ts` with `SERVICE_UNAVAILABLE`, `CONFIGURATION_ERROR`, `INITIALIZATION_FAILED`, and `FORBIDDEN` for more granular error reporting.
### Changed
- **HTTP Transport**:
- Standardized error responses (404, 500) to JSON RPC format for better client interoperability.
- Improved origin checking logic for CORS requests, enhancing security and flexibility.
- Enhanced server address logging for production environments to correctly reflect HTTPS usage behind a reverse proxy.
- **Logging**:
- Refactored console transport configuration in `Logger` into a dedicated `_configureConsoleTransport` method for improved clarity and dynamic adjustment based on log level and TTY status.
- Stack traces included in MCP error notifications (debug mode) are now truncated to a maximum of 1024 characters.
- **Search Service**:
- Improved debug logging in `SearchService` to include `searchPropertyUsed` and `rawSearchValueParam` for better query traceability.
- **Project List Tool**:
- Refactored `atlas_project_list` tool's Zod schema to use `createProjectStatusEnum` for defining project status filters, promoting consistency.
### Fixed
- **Search Service**: Corrected Cypher query construction in `SearchService` by refining the `WITH` clause logic to ensure only previously defined variables are included.
- **Neo4j Helpers**: Ensured `assignedTo` filter parameter is correctly added to query parameters in `buildListQuery` helper function.
### Documentation
- Updated `docs/tree.md` to reflect the new `src/services/llm-providers/` directory and current generation timestamp.
- Updated `README.md` project structure diagram to include the new `llm-providers` service directory.
## [2.8.7] - 2025-05-22
### Changed
- **Development Tooling & Code Quality**:
- Refactored utility scripts in `scripts/` directory for improved clarity, error handling, and security (e.g., path resolution within project root). This includes `clean.ts`, `db-backup.ts`, `db-import.ts`, `fetch-openapi-spec.ts`, `make-executable.ts`, and `tree.ts`.
- Applied Prettier formatting across the entire codebase (`src/`, `examples/`, `scripts/`) for consistent styling.
- **Build & Configuration**:
- Added `tsconfig.typedoc.json` for dedicated TypeDoc generation settings.
- Updated `repomix.config.json` to adjust ignore patterns.
- Minor formatting adjustment in `tsconfig.json`.
- **Documentation**:
- Regenerated `docs/tree.md` to reflect the current project structure.
- Updated version badge in `README.md` to 2.8.7.
- **Search Service**:
- Refined tag-based search logic in `SearchService` to more robustly extract the core tag value from various regex patterns, improving search reliability.
- **Examples**: Minor formatting consistency updates to JSON files in `examples/backup-example/` and `examples/deep-research-example/`.
## [2.8.6] - 2025-05-22
### Added
- **Development Tooling**:
- Integrated `prettier` for consistent code formatting.
- Added a `format` script to `package.json` for running Prettier.
- Included `mcp.json` for MCP client configuration and inspection.
- **Configuration**:
- Introduced `LOGS_DIR` environment variable and configuration to specify the directory for log files, with validation to ensure it's within the project root.
- Added new optional environment variables and corresponding configurations in `src/config/index.ts` for:
- OpenRouter integration (`OPENROUTER_APP_URL`, `OPENROUTER_APP_NAME`, `OPENROUTER_API_KEY`).
- Default LLM parameters (`LLM_DEFAULT_MODEL`, `LLM_DEFAULT_TEMPERATURE`, `LLM_DEFAULT_TOP_P`, `LLM_DEFAULT_MAX_TOKENS`, `LLM_DEFAULT_TOP_K`, `LLM_DEFAULT_MIN_P`).
- OAuth Proxy settings (`OAUTH_PROXY_AUTHORIZATION_URL`, `OAUTH_PROXY_TOKEN_URL`, etc.).
- **Search**:
- Added `assignedToUserId` filter to `atlas_unified_search` tool and `SearchService` for targeted task searches when performing property-specific queries.
### Changed
- **Configuration System (`src/config/index.ts`)**:
- Major refactor to use Zod for robust environment variable validation, type safety, default values, and clearer error messages.
- Improved `package.json` reading for default server name and version.
- Generalized directory creation and validation logic (for `BACKUP_FILE_DIR` and new `LOGS_DIR`) into a reusable `ensureDirectory` function, enhancing security and robustness.
- **Search Functionality**:
- **`atlas_unified_search` Tool**:
- Behavior now differentiates:
- If `property` is specified: Performs a regex-based search on that specific property. `caseInsensitive` and `fuzzy` (for 'contains' matching) options apply.
- If `property` is omitted: Performs a full-text index search across default indexed fields. `fuzzy` option attempts a Lucene fuzzy query (e.g., `term~1`).
- Updated input schema (`types.ts`) and implementation (`unifiedSearch.ts`) to reflect this dual-mode logic and new filters.
- **`SearchService` (`src/services/neo4j/searchService.ts`)**:
- `search()` method (for property-specific search) updated to handle `assignedToUserId` filter for tasks and pass through `caseInsensitive` and `fuzzy` flags.
- `fullTextSearch()` method now executes full-text queries for each entity type (project, task, knowledge) sequentially, each within its own Neo4j session, to improve resource management and error isolation.
- Updated `SearchOptions` type in `src/services/neo4j/types.ts`.
- **Logging (`src/utils/internal/logger.ts`)**:
- Logger now directly uses the validated `config.logsPath` from the central configuration, ensuring logs are stored in the correctly specified and verified directory.
- **Build & Packaging (`package.json`, `package-lock.json`)**:
- Added `prettier` as a dev dependency.
- Updated `package.json` to include an `exports` field for better module resolution.
- Added `engines` field specifying Node.js version compatibility (>=16.0.0).
- Updated author information format.
- **Smithery Configuration (`smithery.yaml`)**:
- Added `LOGS_DIR` to environment variable definitions.
- **Documentation**:
- Updated `docs/tree.md` with the latest file structure and generation timestamp.
- `typedoc.json`: Included `scripts` directory in documentation generation and updated the project name for API docs.
### Fixed
- **Configuration Robustness**: Enhanced safety in `src/config/index.ts` by ensuring `package.json` path resolution stays within the project root and by providing more context in console messages (e.g., when `package.json` cannot be read, or directories cannot be created), especially when `stdout` is a TTY.
## [2.8.5] - 2025-05-22
### Changed
- **Logging & Error Handling**:
- Integrated `RequestContext` (from `src/utils/internal/requestContext.ts`) throughout the application, including all MCP tools, resources, and Neo4j services. This provides a unique `requestId` and `timestamp` for every operation, significantly improving log tracing and debugging capabilities.
- Refactored the `logger.ts` to properly handle `RequestContext` and to ensure that error objects are passed directly to logging methods (e.g., `logger.error("message", errorAsError, context)`).
- Updated `errorHandler.ts` to correctly utilize `RequestContext`, improve error detail consolidation, and ensure consistent logging of error metadata.
- Modified `idGenerator.ts` to remove internal logging calls that were causing circular dependencies with `requestContextService` during application startup.
- **Dependencies**: Updated various dependencies to their latest versions, including `@modelcontextprotocol/sdk` (to 1.11.5), `@types/node` (to 22.15.21), `node-cron` (to 4.0.6), `openai` (to 4.102.0), `zod` (to 3.25.20), and `@types/validator` (to 13.15.1).
- **README.md**: Removed the "Automatic Backups (Note)" section as this functionality was previously deprecated.
- **Version Bump**: Updated project version to `2.8.5` in `package.json`, `package-lock.json`, and `README.md`.
## [2.8.4] - 2025-05-21
### Added
- Enhanced Web UI with new features:
- **Task Flow Visualization**: Integrated Mermaid.js to display task dependencies as a flow chart.
- **View Toggles**: Added "Compact View" and "Detailed View" toggles for Tasks and Knowledge sections.
- **Improved Accessibility**: Enhanced HTML structure with ARIA attributes.
### Changed
- **Web UI Overhaul**:
- Refactored `src/webui/script.js` into modular components (`config`, `dom`, `state`, `utils`, `apiService`, `renderService`, `eventHandlers`) for better maintainability and readability.
- Redesigned `src/webui/style.css` for a modern minimalist aesthetic, including full dark mode support, improved responsiveness, and refined data presentation.
- Updated `src/webui/index.html` with new structural elements, Mermaid.js CDN, and accessibility improvements.
- **Documentation**: Updated `docs/tree.md` to reflect the latest directory structure and generation date.
- **Version Bump**: Updated project version to `2.8.4` in `package.json` and `README.md`.
## [2.8.3] - 2025-05-20
### Added
- Basic Web UI for interacting with the Atlas MCP server. Includes `index.html`, `script.js`, and `style.css` under `src/webui/`.
### Changed
- Updated `docs/tree.md` to reflect the new `src/webui/` directory and current generation date.
## [2.8.2] - 2025-05-19
### Changed
- Updated various dependencies including `@modelcontextprotocol/sdk`, `commander`, `openai`, and `zod`.
- Standardized `RequestContext` usage across the MCP server (`server.ts`) and transport layers (`authMiddleware.ts`, `httpTransport.ts`, `stdioTransport.ts`) for improved logging and request tracing.
- Aligned `req.auth` in `authMiddleware.ts` with the SDK's `AuthInfo` type and enhanced JWT claim extraction for `clientId` and `scopes`.
- Alphabetized tool registration imports in `src/mcp/server.ts` for better organization.
## [2.8.0] - 2025-05-11
- **Repository Alignment**: Updated project structure, dependencies, and development scripts to align with the latest version of the `mcp-ts-template` (https://github.com/cyanheads/mcp-ts-template), ensuring consistency with best practices and template enhancements.
### Added
- **HTTP Transport Support**: Implemented an alternative HTTP transport layer for the MCP server, allowing connections over HTTP in addition to the existing stdio transport. This includes basic authentication middleware.
- **Enhanced Configuration System**: Integrated Zod for environment variable validation, providing type safety and clear error reporting for server configuration. Extended configuration options for server identity, logging, transport, HTTP settings, authentication, and rate limiting.
- **New Utility Modules**: Introduced new utility modules for:
- `metrics`: Includes a `tokenCounter`.
- `parsing`: Includes `dateParser` and `jsonParser`.
- `security`: Includes `rateLimiter` and `sanitization` utilities.
- **Request Context Service**: Added a service for creating and managing request contexts, improving traceability and logging across operations.
- **Dependency Updates**:
- Added `chrono-node` for advanced date parsing.
- Added `openai`, `partial-json`, `sanitize-html`, `tiktoken` to support future AI and text processing capabilities.
- Added `validator` for input validation.
- **New Dependencies & Scripts**: Added `ignore`, `winston-daily-rotate-file`, `yargs` as core dependencies, and `axios`, `js-yaml`, `typedoc` as development dependencies. Introduced new npm scripts for enhanced development workflows, including `docs:generate`, `fetch-spec`, `inspector`, `start:http`, and `start:stdio`.
- **OpenAPI Spec Fetching**: Added a new script `scripts/fetch-openapi-spec.ts` to retrieve OpenAPI specifications, likely for documentation or client generation.
- **Raw JSON for Unified Search**: The `atlas_unified_search` tool can now return raw JSON responses when specified, offering more flexibility for programmatic consumption.
- **Development Tooling**: Added `smithery.yaml`
### Changed
- **README Updates**: Improved clarity, accuracy, and formatting of the README.md. Updated tool descriptions, Neo4j setup instructions, and environment variable explanations.
- **Scripts & Configuration**:
- Updated `db:backup` and `db:import` scripts in `package.json` to use `node --loader ts-node/esm`.
- Standardized logger imports in database scripts to use barrel files.
- Enhanced error logging in `db-import.ts`.
- Changed default `BACKUP_FILE_DIR` in `src/config/index.ts` to `./atlas-backups` and corrected `fs` import order.
- Updated `repomix.config.json` to ignore `.clinerules` and ensure a trailing newline.
- **Project Version**: Bumped version from 2.7.3 to 2.8.0.
- **Dependency Updates**:
- Updated `@modelcontextprotocol/sdk` to `^1.11.1`.
- Updated `@types/node` to `^22.15.17`.
- Updated `node-cron` to `^4.0.3`.
- **Server Core Refactor**: Significantly refactored the main server startup (`src/index.ts`) and MCP server logic (`src/mcp/server.ts`) for better modularity, improved error handling, and to support multiple transport types.
- **Utilities Refactor**: Reorganized the `src/utils` directory into a more modular structure with subdirectories for `internal`, `metrics`, `parsing`, and `security`. Legacy top-level utility files were removed or relocated.
- **Internal Import Paths**: Updated internal import paths for logger, `ErrorHandler`, `McpError`, and `BaseErrorCode` across multiple service and utility files to align with the refactored `src/utils` structure and `src/types/errors.ts`.
- **Error Handling**: Adjusted error code mappings in `errorHandler.ts` (e.g., `PERMISSION_DENIED` from `FORBIDDEN`, `INTERNAL_ERROR` from `UNKNOWN_ERROR`) and updated specific error codes used in `dateParser.ts` (e.g. `VALIDATION_ERROR` from `PARSING_ERROR`).
- **Tool Response Creation**: Standardized MCP tool response creation by replacing custom `createFormattedResponse` utilities and `ResponseFormatter` interfaces with a local interface and the centralized `createToolResponse` function from `types/mcp.js`. This enhances consistency in how tools format and return their results.
- **Type Definitions**: Refactored `ToolContext` in `src/types/tool.ts` to be an alias for `OperationContext` and updated tool registration logic to reflect changes in middleware and permission handling.
- **Configuration Loading**: Improved project root detection and backup directory handling with enhanced security checks.
- **Developer Guidelines**: Significantly updated the developer cheat sheet (`.clinerules`) with comprehensive guidelines on request context, logging, error handling, sanitization, and an expanded repository structure overview.
- **`atlas_deep_research` Tool**: (Moved from Unreleased) Introduced a new MCP tool (`atlas_deep_research`) designed to initiate and structure deep research processes within the Atlas knowledge base. This tool allows users to define a primary research topic, goal, and scope, and break it down into manageable sub-topics with initial search queries. It creates a hierarchical knowledge graph in Neo4j, consisting of a root 'research-plan' node and child 'research-subtopic' nodes, facilitating organized research efforts. The tool supports client-provided IDs, domain categorization, initial tagging, and flexible response formatting (formatted string or raw JSON). Core logic resides in `src/mcp/tools/atlas_deep_research/deepResearch.ts`, with input validation using Zod schemas in `types.ts` and response formatting handled in `responseFormat.ts`. An example demonstrating its usage and output can be found in the [`examples/deep-research-example/`](./examples/deep-research-example/) directory.
- **Package Metadata**: Updated `package.json` with `files` directive, repository details (`repository`, `bugs`, `homepage`), an updated project description, and expanded keywords for better discoverability.
- **Knowledge Service Response**: The `KnowledgeService.addKnowledge` method now includes the `domain` name and `citations` in its response, providing more comprehensive data.
- **Task Service Response**: The `TaskService.createTask` method now includes `assignedToUserId` in its response, directly providing the assignee's identifier.
### Fixed
- **Documentation**: Updated the `docs/tree.md` to reflect the latest directory structure timestamp and changes from recent refactoring.
- **Task Creation Response**: Corrected the `atlas_task_create` tool to ensure the `assignedTo` field in the JSON response accurately reflects the `assignedToUserId` from the service layer, aligning output with input schema expectations.
```
--------------------------------------------------------------------------------
/src/mcp/transports/httpTransport.ts:
--------------------------------------------------------------------------------
```typescript
/**
* @fileoverview Handles the setup and management of the Streamable HTTP MCP transport.
* Implements the MCP Specification 2025-03-26 for Streamable HTTP.
* This includes creating an Express server, configuring middleware (CORS, Authentication),
* defining request routing for the single MCP endpoint (POST/GET/DELETE),
* managing server-side sessions, handling Server-Sent Events (SSE) for streaming,
* and binding to a network port with retry logic for port conflicts.
*
* Specification Reference:
* https://github.com/modelcontextprotocol/modelcontextprotocol/blob/main/docs/specification/2025-03-26/basic/transports.mdx#streamable-http
* @module src/mcp-server/transports/httpTransport
*/
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js";
import { isInitializeRequest } from "@modelcontextprotocol/sdk/types.js";
import express, { NextFunction, Request, Response } from "express";
import http from "http";
import { randomUUID } from "node:crypto";
import { config } from "../../config/index.js";
import { BaseErrorCode, McpError } from "../../types/errors.js"; // For McpError type check
import {
logger,
rateLimiter,
RequestContext,
requestContextService,
} from "../../utils/index.js";
import { mcpAuthMiddleware } from "./authentication/authMiddleware.js";
/**
* The port number for the HTTP transport, configured via `MCP_HTTP_PORT` environment variable.
* Defaults to 3010 if not specified (default is managed by the config module).
* @constant {number} HTTP_PORT
* @private
*/
const HTTP_PORT = config.mcpHttpPort;
/**
* The host address for the HTTP transport, configured via `MCP_HTTP_HOST` environment variable.
* Defaults to '127.0.0.1' if not specified (default is managed by the config module).
* MCP Spec Security Note: Recommends binding to localhost for local servers to minimize exposure.
* @private
*/
const HTTP_HOST = config.mcpHttpHost;
/**
* The single HTTP endpoint path for all MCP communication, as required by the MCP specification.
* This endpoint supports POST, GET, DELETE, and OPTIONS methods.
* @constant {string} MCP_ENDPOINT_PATH
* @private
*/
const MCP_ENDPOINT_PATH = "/mcp";
/**
* Maximum number of attempts to find an available port if the initial `HTTP_PORT` is in use.
* The server will try ports sequentially: `HTTP_PORT`, `HTTP_PORT + 1`, ..., up to `MAX_PORT_RETRIES`.
* @constant {number} MAX_PORT_RETRIES
* @private
*/
const MAX_PORT_RETRIES = 15;
/**
* Stores active `StreamableHTTPServerTransport` instances from the SDK, keyed by their session ID.
* This is essential for routing subsequent HTTP requests (GET, DELETE, non-initialize POST)
* to the correct stateful session transport instance.
* @type {Record<string, StreamableHTTPServerTransport>}
* @private
*/
const httpTransports: Record<string, StreamableHTTPServerTransport> = {};
/**
* Checks if an incoming HTTP request's `Origin` header is permissible based on configuration.
* MCP Spec Security: Servers MUST validate the `Origin` header for cross-origin requests.
* This function checks the request's origin against the `config.mcpAllowedOrigins` list.
* If the server is bound to localhost, requests from localhost or with no/null origin are also permitted.
* Sets appropriate CORS headers (`Access-Control-Allow-Origin`, etc.) if the origin is allowed.
*
* @param req - The Express request object.
* @param res - The Express response object.
* @returns True if the origin is allowed, false otherwise.
* @private
*/
function isOriginAllowed(req: Request, res: Response): boolean {
const origin = req.headers.origin;
// Determine if the server is bound to a localhost interface using the configured HTTP_HOST.
const isLocalhostBinding = ["127.0.0.1", "::1", "localhost"].includes(
config.mcpHttpHost,
);
const allowedOrigins = config.mcpAllowedOrigins || [];
const context = requestContextService.createRequestContext({
operation: "isOriginAllowed",
requestOrigin: origin, // Use a more descriptive key for the request's origin
serverBindingHost: config.mcpHttpHost, // Log the server's binding host for context
isLocalhostBinding,
configuredAllowedOrigins: allowedOrigins, // Use a more descriptive key
});
logger.debug("Checking origin allowance", context);
const allowed =
(origin && allowedOrigins.includes(origin)) || // Origin is explicitly in the whitelist
(isLocalhostBinding && (!origin || origin === "null")); // Or server is localhost and origin is missing or "null"
if (allowed && origin) {
if (origin === "null") {
// For "null" origin (e.g., file:// URLs on localhost), allow access but explicitly disallow credentials.
res.setHeader("Access-Control-Allow-Origin", "null");
res.setHeader("Access-Control-Allow-Credentials", "false"); // Explicitly false for "null" origin
logger.debug(
`Origin is "null" (and server is localhost-bound). Allowing request without credentials. ACAO: "null", ACAC: "false"`,
context,
);
} else {
// For any other allowed, non-null origin (i.e., whitelisted), reflect it and allow credentials.
res.setHeader("Access-Control-Allow-Origin", origin);
res.setHeader("Access-Control-Allow-Credentials", "true");
logger.debug(
`Origin '${origin}' is whitelisted. Allowing with credentials. ACAO: ${origin}, ACAC: "true"`,
context,
);
}
// Common headers for allowed requests (credentials handled above)
res.setHeader("Access-Control-Allow-Methods", "GET, POST, DELETE, OPTIONS");
res.setHeader(
"Access-Control-Allow-Headers",
"Content-Type, Mcp-Session-Id, Last-Event-ID, Authorization",
);
} else if (allowed && !origin && isLocalhostBinding) {
// Case: No origin header, but server is localhost-bound (e.g., same-origin, curl).
// 'allowed' is true. We can allow credentials. ACAO is not strictly needed for same-origin or non-browser.
logger.debug(
`No origin header, but request allowed due to localhost binding. Setting Access-Control-Allow-Credentials to true.`,
context,
);
res.setHeader("Access-Control-Allow-Methods", "GET, POST, DELETE, OPTIONS");
res.setHeader(
"Access-Control-Allow-Headers",
"Content-Type, Mcp-Session-Id, Last-Event-ID, Authorization",
);
res.setHeader("Access-Control-Allow-Credentials", "true");
} else if (!allowed && origin) {
// Origin was present but not allowed by any rule.
logger.warning(`Origin denied: ${origin}`, context);
}
// If !allowed and !origin, no specific CORS headers needed, request proceeds to be potentially denied by other logic or auth.
logger.debug(`Origin check result: ${allowed}`, { ...context, allowed });
return allowed;
}
/**
* Proactively checks if a specific network port is already in use.
* @param port - The port number to check.
* @param host - The host address to check the port on.
* @param parentContext - Logging context from the caller.
* @returns A promise that resolves to `true` if the port is in use, or `false` otherwise.
* @private
*/
async function isPortInUse(
port: number,
host: string,
parentContext: RequestContext,
): Promise<boolean> {
const checkContext = requestContextService.createRequestContext({
...parentContext,
operation: "isPortInUse",
port,
host,
});
logger.debug(`Proactively checking port usability...`, checkContext);
return new Promise((resolve) => {
const tempServer = http.createServer();
tempServer
.once("error", (err: NodeJS.ErrnoException) => {
if (err.code === "EADDRINUSE") {
logger.debug(
`Proactive check: Port confirmed in use (EADDRINUSE).`,
checkContext,
);
resolve(true);
} else {
logger.debug(
`Proactive check: Non-EADDRINUSE error encountered: ${err.message}`,
{ ...checkContext, errorCode: err.code },
);
resolve(false);
}
})
.once("listening", () => {
logger.debug(`Proactive check: Port is available.`, checkContext);
tempServer.close(() => resolve(false));
})
.listen(port, host);
});
}
/**
* Attempts to start the HTTP server, retrying on incrementing ports if `EADDRINUSE` occurs.
*
* @param serverInstance - The Node.js HTTP server instance.
* @param initialPort - The initial port number to try.
* @param host - The host address to bind to.
* @param maxRetries - Maximum number of additional ports to attempt.
* @param parentContext - Logging context from the caller.
* @returns A promise that resolves with the port number the server successfully bound to.
* @throws {Error} If binding fails after all retries or for a non-EADDRINUSE error.
* @private
*/
function startHttpServerWithRetry(
serverInstance: http.Server,
initialPort: number,
host: string,
maxRetries: number,
parentContext: RequestContext,
): Promise<number> {
const startContext = requestContextService.createRequestContext({
...parentContext,
operation: "startHttpServerWithRetry",
initialPort,
host,
maxRetries,
});
logger.debug(`Attempting to start HTTP server...`, startContext);
return new Promise(async (resolve, reject) => {
let lastError: Error | null = null;
for (let i = 0; i <= maxRetries; i++) {
const currentPort = initialPort + i;
const attemptContext = requestContextService.createRequestContext({
...startContext,
port: currentPort,
attempt: i + 1,
maxAttempts: maxRetries + 1,
});
logger.debug(
`Attempting port ${currentPort} (${attemptContext.attempt}/${attemptContext.maxAttempts})`,
attemptContext,
);
if (await isPortInUse(currentPort, host, attemptContext)) {
logger.warning(
`Proactive check detected port ${currentPort} is in use, retrying...`,
attemptContext,
);
lastError = new Error(
`EADDRINUSE: Port ${currentPort} detected as in use by proactive check.`,
);
await new Promise((res) => setTimeout(res, 100));
continue;
}
try {
await new Promise<void>((listenResolve, listenReject) => {
serverInstance
.listen(currentPort, host, () => {
const serverAddress = `http://${host}:${currentPort}${MCP_ENDPOINT_PATH}`;
logger.info(
`HTTP transport successfully listening on host ${host} at ${serverAddress}`,
{ ...attemptContext, address: serverAddress },
);
listenResolve();
})
.on("error", (err: NodeJS.ErrnoException) => {
listenReject(err);
});
});
resolve(currentPort);
return;
} catch (err: any) {
lastError = err;
logger.debug(
`Listen error on port ${currentPort}: Code=${err.code}, Message=${err.message}`,
{ ...attemptContext, errorCode: err.code, errorMessage: err.message },
);
if (err.code === "EADDRINUSE") {
logger.warning(
`Port ${currentPort} already in use (EADDRINUSE), retrying...`,
attemptContext,
);
await new Promise((res) => setTimeout(res, 100));
} else {
logger.error(
`Failed to bind to port ${currentPort} due to non-EADDRINUSE error: ${err.message}`,
{ ...attemptContext, error: err.message },
);
reject(err);
return;
}
}
}
logger.error(
`Failed to bind to any port after ${maxRetries + 1} attempts. Last error: ${lastError?.message}`,
{ ...startContext, error: lastError?.message },
);
reject(
lastError ||
new Error("Failed to bind to any port after multiple retries."),
);
});
}
/**
* Sets up and starts the Streamable HTTP transport layer for the MCP server.
*
* @param createServerInstanceFn - An asynchronous factory function that returns a new `McpServer` instance.
* @param parentContext - Logging context from the main server startup process.
* @returns A promise that resolves with the Node.js `http.Server` instance when the HTTP server is successfully listening.
* @throws {Error} If the server fails to start after all port retries.
*/
export async function startHttpTransport(
createServerInstanceFn: () => Promise<McpServer>,
parentContext: RequestContext,
): Promise<http.Server> {
const app = express();
const transportContext = requestContextService.createRequestContext({
...parentContext,
transportType: "HTTP",
component: "HttpTransportSetup",
});
logger.debug(
"Setting up Express app for HTTP transport...",
transportContext,
);
app.use(express.json());
// Rate Limiting Middleware
// Apply this before more expensive operations like auth or request processing.
const httpRateLimitMiddleware = (
req: Request,
res: Response,
next: NextFunction,
) => {
// Determine a reliable key for rate limiting. Prioritize req.ip,
// then fall back to req.socket.remoteAddress, and finally to a default string.
const rateLimitKey =
req.ip || req.socket.remoteAddress || "unknown_ip_for_rate_limit";
const context = requestContextService.createRequestContext({
operation: "httpRateLimitCheck",
ipAddress: rateLimitKey, // Log the actual key being used
method: req.method,
path: req.path,
});
try {
rateLimiter.check(rateLimitKey, context); // Use the guaranteed string key
logger.debug("Rate limit check passed.", context);
next();
} catch (error) {
if (
error instanceof McpError &&
error.code === BaseErrorCode.RATE_LIMITED
) {
logger.warning(`Rate limit exceeded for IP: ${rateLimitKey}`, {
// Use rateLimitKey here
...context,
errorMessage: error.message,
details: error.details,
});
res.status(429).json({
jsonrpc: "2.0",
error: { code: -32000, message: "Too Many Requests" }, // Generic JSON-RPC error for rate limit
id: (req.body as any)?.id || null,
});
} else {
// For other errors, pass them to the default error handler
logger.error("Unexpected error in rate limit middleware", {
...context,
error: error instanceof Error ? error.message : String(error),
});
next(error);
}
}
};
// Apply rate limiter to the MCP endpoint for all methods
app.use(MCP_ENDPOINT_PATH, httpRateLimitMiddleware);
app.options(MCP_ENDPOINT_PATH, (req, res) => {
const optionsContext = requestContextService.createRequestContext({
...transportContext,
operation: "handleOptions",
origin: req.headers.origin,
method: req.method,
path: req.path,
});
logger.debug(
`Received OPTIONS request for ${MCP_ENDPOINT_PATH}`,
optionsContext,
);
if (isOriginAllowed(req, res)) {
logger.debug(
"OPTIONS request origin allowed, sending 204.",
optionsContext,
);
res.sendStatus(204);
} else {
logger.debug(
"OPTIONS request origin denied, sending 403.",
optionsContext,
);
res.status(403).send("Forbidden: Invalid Origin");
}
});
app.use((req: Request, res: Response, next: NextFunction) => {
const securityContext = requestContextService.createRequestContext({
...transportContext,
operation: "securityMiddleware",
path: req.path,
method: req.method,
origin: req.headers.origin,
});
logger.debug(`Applying security middleware...`, securityContext);
if (!isOriginAllowed(req, res)) {
logger.debug("Origin check failed, sending 403.", securityContext);
res.status(403).send("Forbidden: Invalid Origin");
return;
}
res.setHeader("X-Content-Type-Options", "nosniff");
res.setHeader("Referrer-Policy", "strict-origin-when-cross-origin");
res.setHeader(
"Content-Security-Policy",
"default-src 'self'; script-src 'self'; object-src 'none'; style-src 'self'; img-src 'self'; media-src 'self'; frame-src 'none'; font-src 'self'; connect-src 'self'",
);
logger.debug("Security middleware passed.", securityContext);
next();
});
app.use(mcpAuthMiddleware);
app.post(MCP_ENDPOINT_PATH, async (req, res) => {
const basePostContext = requestContextService.createRequestContext({
...transportContext,
operation: "handlePost",
method: "POST",
path: req.path,
origin: req.headers.origin,
});
logger.debug(`Received POST request on ${MCP_ENDPOINT_PATH}`, {
...basePostContext,
headers: req.headers,
bodyPreview: JSON.stringify(req.body).substring(0, 100),
});
const sessionId = req.headers["mcp-session-id"] as string | undefined;
logger.debug(`Extracted session ID: ${sessionId}`, {
...basePostContext,
sessionId,
});
let transport = sessionId ? httpTransports[sessionId] : undefined;
logger.debug(`Found existing transport for session ID: ${!!transport}`, {
...basePostContext,
sessionId,
});
const isInitReq = isInitializeRequest(req.body);
logger.debug(`Is InitializeRequest: ${isInitReq}`, {
...basePostContext,
sessionId,
});
const requestId = (req.body as any)?.id || null;
try {
if (isInitReq) {
if (transport) {
logger.warning(
"Received InitializeRequest on an existing session ID. Closing old session and creating new.",
{ ...basePostContext, sessionId },
);
await transport.close();
delete httpTransports[sessionId!];
}
logger.info("Handling Initialize Request: Creating new session...", {
...basePostContext,
sessionId,
});
transport = new StreamableHTTPServerTransport({
sessionIdGenerator: () => {
const newId = randomUUID();
logger.debug(`Generated new session ID: ${newId}`, basePostContext);
return newId;
},
onsessioninitialized: (newId) => {
logger.debug(
`Session initialized callback triggered for ID: ${newId}`,
{ ...basePostContext, newSessionId: newId },
);
httpTransports[newId] = transport!;
logger.info(`HTTP Session created: ${newId}`, {
...basePostContext,
newSessionId: newId,
});
},
});
transport.onclose = () => {
const closedSessionId = transport!.sessionId;
if (closedSessionId) {
logger.debug(
`onclose handler triggered for session ID: ${closedSessionId}`,
{ ...basePostContext, closedSessionId },
);
delete httpTransports[closedSessionId];
logger.info(`HTTP Session closed: ${closedSessionId}`, {
...basePostContext,
closedSessionId,
});
} else {
logger.debug(
"onclose handler triggered for transport without session ID (likely init failure).",
basePostContext,
);
}
};
logger.debug(
"Creating McpServer instance for new session...",
basePostContext,
);
const server = await createServerInstanceFn();
logger.debug(
"Connecting McpServer to new transport...",
basePostContext,
);
await server.connect(transport);
logger.debug("McpServer connected to transport.", basePostContext);
} else if (!transport) {
logger.warning(
"Invalid or missing session ID for non-initialize POST request.",
{ ...basePostContext, sessionId },
);
res.status(404).json({
jsonrpc: "2.0",
error: { code: -32004, message: "Invalid or expired session ID" },
id: requestId,
});
return;
}
const currentSessionId = transport.sessionId;
logger.debug(
`Processing POST request content for session ${currentSessionId}...`,
{ ...basePostContext, sessionId: currentSessionId, isInitReq },
);
await transport.handleRequest(req, res, req.body);
logger.debug(
`Finished processing POST request content for session ${currentSessionId}.`,
{ ...basePostContext, sessionId: currentSessionId },
);
} catch (err) {
const errorSessionId = transport?.sessionId || sessionId;
logger.error("Error handling POST request", {
...basePostContext,
sessionId: errorSessionId,
isInitReq,
error: err instanceof Error ? err.message : String(err),
stack: err instanceof Error ? err.stack : undefined,
});
if (!res.headersSent) {
res.status(500).json({
jsonrpc: "2.0",
error: {
code: -32603,
message: "Internal server error during POST handling",
},
id: requestId,
});
}
if (isInitReq && transport && !transport.sessionId) {
logger.debug("Cleaning up transport after initialization failure.", {
...basePostContext,
sessionId: errorSessionId,
});
await transport.close().catch((closeErr) =>
logger.error("Error closing transport after init failure", {
...basePostContext,
sessionId: errorSessionId,
closeError: closeErr,
}),
);
}
}
});
const handleSessionReq = async (req: Request, res: Response) => {
const method = req.method;
const baseSessionReqContext = requestContextService.createRequestContext({
...transportContext,
operation: `handle${method}`,
method,
path: req.path,
origin: req.headers.origin,
});
logger.debug(`Received ${method} request on ${MCP_ENDPOINT_PATH}`, {
...baseSessionReqContext,
headers: req.headers,
});
const sessionId = req.headers["mcp-session-id"] as string | undefined;
logger.debug(`Extracted session ID: ${sessionId}`, {
...baseSessionReqContext,
sessionId,
});
const transport = sessionId ? httpTransports[sessionId] : undefined;
logger.debug(`Found existing transport for session ID: ${!!transport}`, {
...baseSessionReqContext,
sessionId,
});
if (!transport) {
logger.warning(`Session not found for ${method} request`, {
...baseSessionReqContext,
sessionId,
});
res.status(404).json({
jsonrpc: "2.0",
error: { code: -32004, message: "Session not found or expired" },
id: null, // Or a relevant request identifier if available from context
});
return;
}
try {
logger.debug(
`Delegating ${method} request to transport for session ${sessionId}...`,
{ ...baseSessionReqContext, sessionId },
);
await transport.handleRequest(req, res);
logger.info(
`Successfully handled ${method} request for session ${sessionId}`,
{ ...baseSessionReqContext, sessionId },
);
} catch (err) {
logger.error(
`Error handling ${method} request for session ${sessionId}`,
{
...baseSessionReqContext,
sessionId,
error: err instanceof Error ? err.message : String(err),
stack: err instanceof Error ? err.stack : undefined,
},
);
if (!res.headersSent) {
res.status(500).json({
jsonrpc: "2.0",
error: { code: -32603, message: "Internal Server Error" },
id: null, // Or a relevant request identifier
});
}
}
};
app.get(MCP_ENDPOINT_PATH, handleSessionReq);
app.delete(MCP_ENDPOINT_PATH, handleSessionReq);
logger.debug("Creating HTTP server instance...", transportContext);
const serverInstance = http.createServer(app);
try {
logger.debug(
"Attempting to start HTTP server with retry logic...",
transportContext,
);
const actualPort = await startHttpServerWithRetry(
serverInstance,
config.mcpHttpPort,
config.mcpHttpHost,
MAX_PORT_RETRIES,
transportContext,
);
let serverAddressLog = `http://${config.mcpHttpHost}:${actualPort}${MCP_ENDPOINT_PATH}`;
let productionNote = "";
if (config.environment === "production") {
// The server itself runs HTTP, but it's expected to be behind an HTTPS proxy in production.
// The log reflects the effective public-facing URL.
serverAddressLog = `https://${config.mcpHttpHost}:${actualPort}${MCP_ENDPOINT_PATH}`;
productionNote = ` (via HTTPS, ensure reverse proxy is configured)`;
}
if (process.stdout.isTTY) {
console.log(
`\n🚀 MCP Server running in HTTP mode at: ${serverAddressLog}${productionNote}\n (MCP Spec: 2025-03-26 Streamable HTTP Transport)\n`,
);
}
return serverInstance; // Return the created server instance
} catch (err) {
logger.fatal("HTTP server failed to start after multiple port retries.", {
...transportContext,
error: err instanceof Error ? err.message : String(err),
});
throw err; // Re-throw the error to be caught by the caller
}
}
```
--------------------------------------------------------------------------------
/src/services/neo4j/projectService.ts:
--------------------------------------------------------------------------------
```typescript
import { logger, requestContextService } from "../../utils/index.js"; // Updated import path
import { neo4jDriver } from "./driver.js";
import { buildListQuery, generateId } from "./helpers.js"; // Import buildListQuery
import {
Neo4jProject,
NodeLabels,
PaginatedResult,
ProjectDependencyType, // Import the new enum
ProjectFilterOptions,
RelationshipTypes,
} from "./types.js";
import { Neo4jUtils } from "./utils.js";
/**
* Service for managing Project entities in Neo4j
*/
export class ProjectService {
/**
* Create a new project
* @param project Project data
* @returns The created project
*/
static async createProject(
project: Omit<Neo4jProject, "id" | "createdAt" | "updatedAt"> & {
id?: string;
},
): Promise<Neo4jProject> {
const session = await neo4jDriver.getSession();
try {
const projectId = project.id || `proj_${generateId()}`;
const now = Neo4jUtils.getCurrentTimestamp();
// Neo4j properties must be primitive types or arrays of primitives.
// Serialize the 'urls' array (which contains objects) to a JSON string for storage.
const query = `
CREATE (p:${NodeLabels.Project} {
id: $id,
name: $name,
description: $description,
status: $status,
urls: $urls,
completionRequirements: $completionRequirements,
outputFormat: $outputFormat,
taskType: $taskType,
createdAt: $createdAt,
updatedAt: $updatedAt
})
RETURN p.id as id,
p.name as name,
p.description as description,
p.status as status,
p.urls as urls,
p.completionRequirements as completionRequirements,
p.outputFormat as outputFormat,
p.taskType as taskType,
p.createdAt as createdAt,
p.updatedAt as updatedAt
`;
// Serialize urls to JSON string before passing as parameter
const params = {
id: projectId,
name: project.name,
description: project.description,
status: project.status,
urls: JSON.stringify(project.urls || []), // Serialize to JSON string
completionRequirements: project.completionRequirements,
outputFormat: project.outputFormat,
taskType: project.taskType,
createdAt: now,
updatedAt: now,
};
const result = await session.executeWrite(async (tx) => {
const result = await tx.run(query, params);
// Use .get() for each field to ensure type safety
return result.records.length > 0 ? result.records[0] : null;
});
if (!result) {
throw new Error("Failed to create project or retrieve its properties");
}
// Explicitly construct the object and deserialize urls from JSON string
const createdProjectData: Neo4jProject = {
id: result.get("id"),
name: result.get("name"),
description: result.get("description"),
status: result.get("status"),
urls: JSON.parse(result.get("urls") || "[]"), // Deserialize from JSON string
completionRequirements: result.get("completionRequirements"),
outputFormat: result.get("outputFormat"),
taskType: result.get("taskType"),
createdAt: result.get("createdAt"),
updatedAt: result.get("updatedAt"),
};
// Now createdProjectData has the correct type before this line
const reqContext_create = requestContextService.createRequestContext({
operation: "createProject",
projectId: createdProjectData.id,
});
logger.info("Project created successfully", reqContext_create);
return createdProjectData; // No need for 'as Neo4jProject' here anymore
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "createProject.error",
projectInput: project,
});
logger.error("Error creating project", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Get a project by ID
* @param id Project ID
* @returns The project or null if not found
*/
static async getProjectById(id: string): Promise<Neo4jProject | null> {
const session = await neo4jDriver.getSession();
try {
// Retrieve urls as JSON string and deserialize later
const query = `
MATCH (p:${NodeLabels.Project} {id: $id})
RETURN p.id as id,
p.name as name,
p.description as description,
p.status as status,
p.urls as urls,
p.completionRequirements as completionRequirements,
p.outputFormat as outputFormat,
p.taskType as taskType,
p.createdAt as createdAt,
p.updatedAt as updatedAt
`;
const result = await session.executeRead(async (tx) => {
const result = await tx.run(query, { id });
return result.records;
});
if (result.length === 0) {
return null;
}
const record = result[0];
// Explicitly construct the object and deserialize urls from JSON string
const projectData: Neo4jProject = {
id: record.get("id"),
name: record.get("name"),
description: record.get("description"),
status: record.get("status"),
urls: JSON.parse(record.get("urls") || "[]"), // Deserialize from JSON string
completionRequirements: record.get("completionRequirements"),
outputFormat: record.get("outputFormat"),
taskType: record.get("taskType"),
createdAt: record.get("createdAt"),
updatedAt: record.get("updatedAt"),
};
return projectData;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "getProjectById.error",
projectId: id,
});
logger.error("Error getting project by ID", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Check if all dependencies of a project are completed
* @param projectId Project ID to check dependencies for
* @returns True if all dependencies are completed, false otherwise
*/
static async areAllDependenciesCompleted(
projectId: string,
): Promise<boolean> {
const session = await neo4jDriver.getSession();
try {
// Query remains the same
const query = `
MATCH (p:${NodeLabels.Project} {id: $projectId})-[:${RelationshipTypes.DEPENDS_ON}]->(dep:${NodeLabels.Project})
WHERE dep.status <> 'completed'
RETURN count(dep) AS incompleteCount
`;
const result = await session.executeRead(async (tx) => {
const result = await tx.run(query, { projectId });
// Use .get() for each field and check existence before calling toNumber()
const record = result.records[0];
const countField = record ? record.get("incompleteCount") : null;
// Neo4j count() usually returns a standard JS number or a Neo4j Integer
// Handle both cases: if it has toNumber, use it; otherwise, assume it's a number or 0.
return countField && typeof countField.toNumber === "function"
? countField.toNumber()
: countField || 0;
});
// Check if the count is exactly 0
return result === 0;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "areAllDependenciesCompleted.error",
projectId,
});
logger.error(
"Error checking project dependencies completion status",
error as Error,
{ ...errorContext, detail: errorMessage },
);
throw error;
} finally {
await session.close();
}
}
/**
* Update a project
* @param id Project ID
* @param updates Project updates
* @returns The updated project
*/
static async updateProject(
id: string,
updates: Partial<Omit<Neo4jProject, "id" | "createdAt" | "updatedAt">>,
): Promise<Neo4jProject> {
const session = await neo4jDriver.getSession();
try {
const exists = await Neo4jUtils.nodeExists(NodeLabels.Project, "id", id);
if (!exists) {
throw new Error(`Project with ID ${id} not found`);
}
if (updates.status === "in-progress" || updates.status === "completed") {
const depsCompleted = await this.areAllDependenciesCompleted(id);
if (!depsCompleted) {
throw new Error(
`Cannot mark project as ${updates.status} because not all dependencies are completed`,
);
}
}
const updateParams: Record<string, any> = {
id,
updatedAt: Neo4jUtils.getCurrentTimestamp(),
};
let setClauses = ["p.updatedAt = $updatedAt"];
// Serialize urls to JSON string if it's part of the updates
for (const [key, value] of Object.entries(updates)) {
if (value !== undefined) {
// Serialize urls array to JSON string if it's the key being updated
updateParams[key] =
key === "urls" ? JSON.stringify(value || []) : value;
setClauses.push(`p.${key} = $${key}`);
}
}
// Retrieve urls as JSON string and deserialize later
const query = `
MATCH (p:${NodeLabels.Project} {id: $id})
SET ${setClauses.join(", ")}
RETURN p.id as id,
p.name as name,
p.description as description,
p.status as status,
p.urls as urls,
p.completionRequirements as completionRequirements,
p.outputFormat as outputFormat,
p.taskType as taskType,
p.createdAt as createdAt,
p.updatedAt as updatedAt
`;
const result = await session.executeWrite(async (tx) => {
const result = await tx.run(query, updateParams);
// Use .get() for each field
return result.records.length > 0 ? result.records[0] : null;
});
if (!result) {
throw new Error("Failed to update project or retrieve its properties");
}
// Explicitly construct the object and deserialize urls from JSON string
const updatedProjectData: Neo4jProject = {
id: result.get("id"),
name: result.get("name"),
description: result.get("description"),
status: result.get("status"),
urls: JSON.parse(result.get("urls") || "[]"), // Deserialize from JSON string
completionRequirements: result.get("completionRequirements"),
outputFormat: result.get("outputFormat"),
taskType: result.get("taskType"),
createdAt: result.get("createdAt"),
updatedAt: result.get("updatedAt"),
};
const reqContext_update = requestContextService.createRequestContext({
operation: "updateProject",
projectId: id,
});
logger.info("Project updated successfully", reqContext_update);
return updatedProjectData;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "updateProject.error",
projectId: id,
updatesApplied: updates,
});
logger.error("Error updating project", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Delete a project and all its associated tasks and knowledge items
* @param id Project ID
* @returns True if deleted, false if not found
*/
static async deleteProject(id: string): Promise<boolean> {
const session = await neo4jDriver.getSession();
try {
const exists = await Neo4jUtils.nodeExists(NodeLabels.Project, "id", id);
if (!exists) {
return false;
}
// DETACH DELETE remains the same
const query = `
MATCH (p:${NodeLabels.Project} {id: $id})
DETACH DELETE p
`;
await session.executeWrite(async (tx) => {
await tx.run(query, { id });
});
const reqContext_delete = requestContextService.createRequestContext({
operation: "deleteProject",
projectId: id,
});
logger.info("Project deleted successfully", reqContext_delete);
return true;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "deleteProject.error",
projectId: id,
});
logger.error("Error deleting project", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Get all projects with optional filtering and pagination
* @param options Filter and pagination options
* @returns Paginated list of projects
*/
static async getProjects(
options: ProjectFilterOptions = {},
): Promise<PaginatedResult<Neo4jProject>> {
const session = await neo4jDriver.getSession();
try {
const nodeAlias = "p";
// Define the properties to return
const returnProperties = [
`${nodeAlias}.id as id`,
`${nodeAlias}.name as name`,
`${nodeAlias}.description as description`,
`${nodeAlias}.status as status`,
`${nodeAlias}.urls as urls`,
`${nodeAlias}.completionRequirements as completionRequirements`,
`${nodeAlias}.outputFormat as outputFormat`,
`${nodeAlias}.taskType as taskType`,
`${nodeAlias}.createdAt as createdAt`,
`${nodeAlias}.updatedAt as updatedAt`,
];
// Use buildListQuery helper
// Note: searchTerm filter is not currently supported by buildListQuery
if (options.searchTerm) {
logger.warning(
"searchTerm filter is not currently supported in getProjects when using buildListQuery helper.",
);
}
const { countQuery, dataQuery, params } = buildListQuery(
NodeLabels.Project,
returnProperties,
{
// Filters
status: options.status,
taskType: options.taskType,
// searchTerm is omitted here
},
{
// Pagination
sortBy: "createdAt", // Default sort for projects
sortDirection: "desc",
page: options.page,
limit: options.limit,
},
nodeAlias, // Primary node alias
// No additional MATCH clauses needed for basic project listing
);
// Execute count query
const reqContext_list = requestContextService.createRequestContext({
operation: "getProjects",
filterOptions: options,
});
const totalResult = await session.executeRead(async (tx) => {
const countParams = { ...params };
delete countParams.skip;
delete countParams.limit;
logger.debug("Executing Project Count Query (using buildListQuery):", {
...reqContext_list,
query: countQuery,
params: countParams,
});
const result = await tx.run(countQuery, countParams);
return result.records[0]?.get("total") ?? 0;
});
const total = totalResult;
logger.debug("Calculated total projects", { ...reqContext_list, total });
// Execute data query
const dataResult = await session.executeRead(async (tx) => {
logger.debug("Executing Project Data Query (using buildListQuery):", {
...reqContext_list,
query: dataQuery,
params: params,
});
const result = await tx.run(dataQuery, params);
return result.records;
});
// Map results - deserialize urls from JSON string
const projects: Neo4jProject[] = dataResult.map((record) => {
// Explicitly construct the object and deserialize urls
const projectData: Neo4jProject = {
id: record.get("id"),
name: record.get("name"),
description: record.get("description"),
status: record.get("status"),
urls: JSON.parse(record.get("urls") || "[]"), // Deserialize from JSON string
completionRequirements: record.get("completionRequirements"),
outputFormat: record.get("outputFormat"),
taskType: record.get("taskType"),
createdAt: record.get("createdAt"),
updatedAt: record.get("updatedAt"),
};
return projectData;
});
const page = Math.max(options.page || 1, 1);
const limit = Math.min(Math.max(options.limit || 20, 1), 100);
const totalPages = Math.ceil(total / limit);
return {
data: projects,
total,
page,
limit,
totalPages,
};
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "getProjects.error",
filterOptions: options,
});
logger.error("Error getting projects", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Add a dependency relationship between projects
* @param sourceProjectId ID of the dependent project (source)
* @param targetProjectId ID of the dependency project (target)
* @param type Type of dependency relationship - TODO: Use enum/constant
* @param description Description of the dependency
* @returns The IDs of the two projects and the relationship type
*/
static async addProjectDependency(
sourceProjectId: string,
targetProjectId: string,
type: ProjectDependencyType, // Use the enum
description: string,
): Promise<{
id: string;
sourceProjectId: string;
targetProjectId: string;
type: string;
description: string;
}> {
const session = await neo4jDriver.getSession();
try {
// Logic remains the same
const sourceExists = await Neo4jUtils.nodeExists(
NodeLabels.Project,
"id",
sourceProjectId,
);
const targetExists = await Neo4jUtils.nodeExists(
NodeLabels.Project,
"id",
targetProjectId,
);
if (!sourceExists)
throw new Error(`Source project with ID ${sourceProjectId} not found`);
if (!targetExists)
throw new Error(`Target project with ID ${targetProjectId} not found`);
const dependencyExists = await Neo4jUtils.relationshipExists(
NodeLabels.Project,
"id",
sourceProjectId,
NodeLabels.Project,
"id",
targetProjectId,
RelationshipTypes.DEPENDS_ON,
);
if (dependencyExists) {
throw new Error(
`Dependency relationship already exists between projects ${sourceProjectId} and ${targetProjectId}`,
);
}
const circularDependencyQuery = `
MATCH path = (target:${NodeLabels.Project} {id: $targetProjectId})-[:${RelationshipTypes.DEPENDS_ON}*]->(source:${NodeLabels.Project} {id: $sourceProjectId})
RETURN count(path) > 0 AS hasCycle
`;
const cycleCheckResult = await session.executeRead(async (tx) => {
const result = await tx.run(circularDependencyQuery, {
sourceProjectId,
targetProjectId,
});
return result.records[0]?.get("hasCycle");
});
if (cycleCheckResult) {
throw new Error(
"Adding this dependency would create a circular dependency chain",
);
}
const dependencyId = `pdep_${generateId()}`;
const query = `
MATCH (source:${NodeLabels.Project} {id: $sourceProjectId}),
(target:${NodeLabels.Project} {id: $targetProjectId})
CREATE (source)-[r:${RelationshipTypes.DEPENDS_ON} {
id: $dependencyId,
type: $type,
description: $description,
createdAt: $createdAt
}]->(target)
RETURN r.id as id, source.id as sourceProjectId, target.id as targetProjectId, r.type as type, r.description as description
`;
const params = {
sourceProjectId,
targetProjectId,
dependencyId,
type,
description,
createdAt: Neo4jUtils.getCurrentTimestamp(),
};
const result = await session.executeWrite(async (tx) => {
const result = await tx.run(query, params);
return result.records;
});
if (!result || result.length === 0) {
throw new Error("Failed to create project dependency relationship");
}
const record = result[0];
const dependency = {
id: record.get("id"),
sourceProjectId: record.get("sourceProjectId"),
targetProjectId: record.get("targetProjectId"),
type: record.get("type"),
description: record.get("description"),
};
const reqContext_addDep = requestContextService.createRequestContext({
operation: "addProjectDependency",
sourceProjectId,
targetProjectId,
dependencyType: type,
});
logger.info("Project dependency added successfully", reqContext_addDep);
return dependency;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "addProjectDependency.error",
sourceProjectId,
targetProjectId,
dependencyType: type,
});
logger.error("Error adding project dependency", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Remove a dependency relationship between projects
* @param dependencyId The ID of the dependency relationship to remove
* @returns True if removed, false if not found
*/
static async removeProjectDependency(dependencyId: string): Promise<boolean> {
const session = await neo4jDriver.getSession();
try {
// Query remains the same
const query = `
MATCH (source:${NodeLabels.Project})-[r:${RelationshipTypes.DEPENDS_ON} {id: $dependencyId}]->(target:${NodeLabels.Project})
DELETE r
`;
const result = await session.executeWrite(async (tx) => {
const res = await tx.run(query, { dependencyId });
return res.summary.counters.updates().relationshipsDeleted > 0;
});
const reqContext_removeDep = requestContextService.createRequestContext({
operation: "removeProjectDependency",
dependencyId,
});
if (result) {
logger.info(
"Project dependency removed successfully",
reqContext_removeDep,
);
} else {
logger.warning(
"Dependency not found or not removed",
reqContext_removeDep,
);
}
return result;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "removeProjectDependency.error",
dependencyId,
});
logger.error("Error removing project dependency", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Get all dependencies for a project (both dependencies and dependents)
* @param projectId Project ID
* @returns Object containing dependencies and dependents
*/
static async getProjectDependencies(projectId: string): Promise<{
dependencies: {
id: string;
sourceProjectId: string;
targetProjectId: string;
type: string;
description: string;
targetProject: {
id: string;
name: string;
status: string;
};
}[];
dependents: {
id: string;
sourceProjectId: string;
targetProjectId: string;
type: string;
description: string;
sourceProject: {
id: string;
name: string;
status: string;
};
}[];
}> {
const session = await neo4jDriver.getSession();
try {
// Logic remains the same
const exists = await Neo4jUtils.nodeExists(
NodeLabels.Project,
"id",
projectId,
);
if (!exists) {
throw new Error(`Project with ID ${projectId} not found`);
}
const dependenciesQuery = `
MATCH (source:${NodeLabels.Project} {id: $projectId})-[r:${RelationshipTypes.DEPENDS_ON}]->(target:${NodeLabels.Project})
RETURN r.id AS id,
source.id AS sourceProjectId,
target.id AS targetProjectId,
r.type AS type,
r.description AS description,
target.name AS targetName,
target.status AS targetStatus
ORDER BY r.type, target.name
`;
const dependentsQuery = `
MATCH (source:${NodeLabels.Project})-[r:${RelationshipTypes.DEPENDS_ON}]->(target:${NodeLabels.Project} {id: $projectId})
RETURN r.id AS id,
source.id AS sourceProjectId,
target.id AS targetProjectId,
r.type AS type,
r.description AS description,
source.name AS sourceName,
source.status AS sourceStatus
ORDER BY r.type, source.name
`;
const [dependenciesResult, dependentsResult] = await Promise.all([
session.executeRead(
async (tx) =>
(await tx.run(dependenciesQuery, { projectId })).records,
),
session.executeRead(
async (tx) => (await tx.run(dependentsQuery, { projectId })).records,
),
]);
const dependencies = dependenciesResult.map((record) => ({
id: record.get("id"),
sourceProjectId: record.get("sourceProjectId"),
targetProjectId: record.get("targetProjectId"),
type: record.get("type"),
description: record.get("description"),
targetProject: {
id: record.get("targetProjectId"),
name: record.get("targetName"),
status: record.get("targetStatus"),
},
}));
const dependents = dependentsResult.map((record) => ({
id: record.get("id"),
sourceProjectId: record.get("sourceProjectId"),
targetProjectId: record.get("targetProjectId"),
type: record.get("type"),
description: record.get("description"),
sourceProject: {
id: record.get("sourceProjectId"),
name: record.get("sourceName"),
status: record.get("sourceStatus"),
},
}));
return { dependencies, dependents };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "getProjectDependencies.error",
projectId,
});
logger.error("Error getting project dependencies", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
}
```
--------------------------------------------------------------------------------
/src/services/neo4j/knowledgeService.ts:
--------------------------------------------------------------------------------
```typescript
import { logger, requestContextService } from "../../utils/index.js"; // Updated import path
import { neo4jDriver } from "./driver.js";
import { generateId, escapeRelationshipType } from "./helpers.js";
import {
KnowledgeFilterOptions,
Neo4jKnowledge, // This type no longer has domain/citations
NodeLabels,
PaginatedResult,
RelationshipTypes,
} from "./types.js";
import { Neo4jUtils } from "./utils.js";
import { int } from "neo4j-driver"; // Import 'int' for pagination
/**
* Service for managing Knowledge entities in Neo4j
*/
export class KnowledgeService {
/**
* Add a new knowledge item
* @param knowledge Input data, potentially including domain and citations for relationship creation
* @returns The created knowledge item, including its domain and citations.
*/
static async addKnowledge(
knowledge: Omit<Neo4jKnowledge, "id" | "createdAt" | "updatedAt"> & {
id?: string;
domain?: string;
citations?: string[];
},
): Promise<Neo4jKnowledge & { domain: string | null; citations: string[] }> {
const session = await neo4jDriver.getSession();
try {
const projectExists = await Neo4jUtils.nodeExists(
NodeLabels.Project,
"id",
knowledge.projectId,
);
if (!projectExists) {
throw new Error(`Project with ID ${knowledge.projectId} not found`);
}
const knowledgeId = knowledge.id || `know_${generateId()}`;
const now = Neo4jUtils.getCurrentTimestamp();
// Input validation for domain
if (
!knowledge.domain ||
typeof knowledge.domain !== "string" ||
knowledge.domain.trim() === ""
) {
throw new Error("Domain is required to create a knowledge item.");
}
// Create knowledge node and relationship to project
// Removed domain and citations properties from CREATE
const query = `
MATCH (p:${NodeLabels.Project} {id: $projectId})
CREATE (k:${NodeLabels.Knowledge} {
id: $id,
projectId: $projectId,
text: $text,
tags: $tags,
createdAt: $createdAt,
updatedAt: $updatedAt
})
CREATE (p)-[r:${RelationshipTypes.CONTAINS_KNOWLEDGE}]->(k)
// Create domain relationship
MERGE (d:${NodeLabels.Domain} {name: $domain})
ON CREATE SET d.createdAt = $createdAt
CREATE (k)-[:${RelationshipTypes.BELONGS_TO_DOMAIN}]->(d)
// Return properties including domain name
WITH k, d
RETURN k.id as id, k.projectId as projectId, k.text as text, k.tags as tags, k.createdAt as createdAt, k.updatedAt as updatedAt, d.name as domainName
`;
const params = {
id: knowledgeId,
projectId: knowledge.projectId,
text: knowledge.text,
tags: knowledge.tags || [],
domain: knowledge.domain, // Domain needed for MERGE Domain node
createdAt: now,
updatedAt: now,
};
const result = await session.executeWrite(async (tx) => {
const result = await tx.run(query, params);
return result.records;
});
const createdKnowledgeRecord = result[0];
if (!createdKnowledgeRecord) {
throw new Error(
"Failed to create knowledge item or retrieve its properties",
);
}
// Construct the Neo4jKnowledge object from the returned record
const baseKnowledge: Neo4jKnowledge = {
id: createdKnowledgeRecord.get("id"),
projectId: createdKnowledgeRecord.get("projectId"),
text: createdKnowledgeRecord.get("text"),
tags: createdKnowledgeRecord.get("tags") || [],
createdAt: createdKnowledgeRecord.get("createdAt"),
updatedAt: createdKnowledgeRecord.get("updatedAt"),
};
const domainName = createdKnowledgeRecord.get("domainName") as
| string
| null;
let actualCitations: string[] = [];
// Process citations using the input 'knowledge' object
const inputCitations = knowledge.citations;
if (
inputCitations &&
Array.isArray(inputCitations) &&
inputCitations.length > 0
) {
await this.addCitations(knowledgeId, inputCitations);
actualCitations = inputCitations; // Assume these are the citations for the response
}
const reqContext = requestContextService.createRequestContext({
operation: "addKnowledge",
knowledgeId: baseKnowledge.id,
projectId: knowledge.projectId,
});
logger.info("Knowledge item created successfully", reqContext);
// Return the extended object with domain and citations
return {
...baseKnowledge,
domain: domainName || knowledge.domain || null, // Fallback to input domain if query somehow misses it
citations: actualCitations,
};
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "addKnowledge.error",
knowledgeInput: knowledge,
});
logger.error("Error creating knowledge item", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Link two knowledge items with a specified relationship type.
* @param sourceId ID of the source knowledge item
* @param targetId ID of the target knowledge item
* @param relationshipType The type of relationship to create (e.g., 'RELATED_TO', 'IS_SUBTOPIC_OF') - Validation needed
* @returns True if the link was created successfully, false otherwise
*/
static async linkKnowledgeToKnowledge(
sourceId: string,
targetId: string,
relationshipType: string,
): Promise<boolean> {
// TODO: Validate relationshipType against allowed types or RelationshipTypes enum
const session = await neo4jDriver.getSession();
const reqContext = requestContextService.createRequestContext({
operation: "linkKnowledgeToKnowledge",
sourceId,
targetId,
relationshipType,
});
logger.debug(
`Attempting to link knowledge ${sourceId} to ${targetId} with type ${relationshipType}`,
reqContext,
);
try {
const sourceExists = await Neo4jUtils.nodeExists(
NodeLabels.Knowledge,
"id",
sourceId,
);
const targetExists = await Neo4jUtils.nodeExists(
NodeLabels.Knowledge,
"id",
targetId,
);
if (!sourceExists || !targetExists) {
logger.warning(
`Cannot link knowledge: Source (${sourceId} exists: ${sourceExists}) or Target (${targetId} exists: ${targetExists}) not found.`,
{ ...reqContext, sourceExists, targetExists },
);
return false;
}
// Escape relationship type for safety
const escapedType = escapeRelationshipType(relationshipType);
const query = `
MATCH (source:${NodeLabels.Knowledge} {id: $sourceId})
MATCH (target:${NodeLabels.Knowledge} {id: $targetId})
MERGE (source)-[r:${escapedType}]->(target)
RETURN r
`;
const result = await session.executeWrite(async (tx) => {
const runResult = await tx.run(query, { sourceId, targetId });
return runResult.records;
});
const linkCreated = result.length > 0;
if (linkCreated) {
logger.info(
`Successfully linked knowledge ${sourceId} to ${targetId} with type ${relationshipType}`,
reqContext,
);
} else {
logger.warning(
`Failed to link knowledge ${sourceId} to ${targetId} (MERGE returned no relationship)`,
reqContext,
);
}
return linkCreated;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
logger.error("Error linking knowledge items", error as Error, {
...reqContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Get a knowledge item by ID, including its domain and citations via relationships.
* @param id Knowledge ID
* @returns The knowledge item with domain and citations added, or null if not found.
*/
static async getKnowledgeById(
id: string,
): Promise<
(Neo4jKnowledge & { domain: string | null; citations: string[] }) | null
> {
const session = await neo4jDriver.getSession();
try {
// Fetch domain and citations via relationships
const query = `
MATCH (k:${NodeLabels.Knowledge} {id: $id})
OPTIONAL MATCH (k)-[:${RelationshipTypes.BELONGS_TO_DOMAIN}]->(d:${NodeLabels.Domain})
OPTIONAL MATCH (k)-[:${RelationshipTypes.CITES}]->(c:${NodeLabels.Citation})
RETURN k.id as id,
k.projectId as projectId,
k.text as text,
k.tags as tags,
d.name as domainName, // Fetch domain name
collect(DISTINCT c.source) as citationSources, // Collect distinct citation sources
k.createdAt as createdAt,
k.updatedAt as updatedAt
`;
const result = await session.executeRead(async (tx) => {
const result = await tx.run(query, { id });
return result.records;
});
if (result.length === 0) {
return null;
}
const record = result[0];
// Construct the base Neo4jKnowledge object
const knowledge: Neo4jKnowledge = {
id: record.get("id"),
projectId: record.get("projectId"),
text: record.get("text"),
tags: record.get("tags") || [],
createdAt: record.get("createdAt"),
updatedAt: record.get("updatedAt"),
};
// Add domain and citations fetched via relationships
const domain = record.get("domainName");
const citations = record
.get("citationSources")
.filter((c: string | null): c is string => c !== null); // Filter nulls if no citations found
return {
...knowledge,
domain: domain, // Can be null if no domain relationship exists
citations: citations,
};
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const reqContext = requestContextService.createRequestContext({
operation: "getKnowledgeById.error",
knowledgeId: id,
});
logger.error("Error getting knowledge by ID", error as Error, {
...reqContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Update a knowledge item, including domain and citation relationships.
* @param id Knowledge ID
* @param updates Updates including optional domain and citations
* @returns The updated knowledge item (without domain/citations properties)
*/
static async updateKnowledge(
id: string,
updates: Partial<
Omit<Neo4jKnowledge, "id" | "projectId" | "createdAt" | "updatedAt">
> & { domain?: string; citations?: string[] },
): Promise<Neo4jKnowledge> {
const session = await neo4jDriver.getSession();
try {
const exists = await Neo4jUtils.nodeExists(
NodeLabels.Knowledge,
"id",
id,
);
if (!exists) {
throw new Error(`Knowledge with ID ${id} not found`);
}
const updateParams: Record<string, any> = {
id,
updatedAt: Neo4jUtils.getCurrentTimestamp(),
};
let setClauses = ["k.updatedAt = $updatedAt"];
const allowedProperties: (keyof Neo4jKnowledge)[] = [
"projectId",
"text",
"tags",
]; // Define properties that can be updated
// Add update clauses for allowed properties defined in Neo4jKnowledge
for (const [key, value] of Object.entries(updates)) {
// Check if the key is one of the allowed properties and value is defined
if (
value !== undefined &&
allowedProperties.includes(key as keyof Neo4jKnowledge)
) {
updateParams[key] = value;
setClauses.push(`k.${key} = $${key}`);
}
}
// Handle domain update using relationships
let domainUpdateClause = "";
const domainUpdateValue = updates.domain;
if (domainUpdateValue) {
if (
typeof domainUpdateValue !== "string" ||
domainUpdateValue.trim() === ""
) {
throw new Error("Domain update value cannot be empty.");
}
updateParams.domain = domainUpdateValue;
domainUpdateClause = `
// Update domain relationship
WITH k // Ensure k is in scope
OPTIONAL MATCH (k)-[oldDomainRel:${RelationshipTypes.BELONGS_TO_DOMAIN}]->(:${NodeLabels.Domain})
DELETE oldDomainRel
MERGE (newDomain:${NodeLabels.Domain} {name: $domain})
ON CREATE SET newDomain.createdAt = $updatedAt // Set timestamp if domain is new
CREATE (k)-[:${RelationshipTypes.BELONGS_TO_DOMAIN}]->(newDomain)
`;
}
// Construct the main update query
const query = `
MATCH (k:${NodeLabels.Knowledge} {id: $id})
${setClauses.length > 0 ? `SET ${setClauses.join(", ")}` : ""}
${domainUpdateClause}
// Return basic properties defined in Neo4jKnowledge
RETURN k.id as id, k.projectId as projectId, k.text as text, k.tags as tags, k.createdAt as createdAt, k.updatedAt as updatedAt
`;
const result = await session.executeWrite(async (tx) => {
const result = await tx.run(query, updateParams);
return result.records;
});
const updatedKnowledgeRecord = result[0];
if (!updatedKnowledgeRecord) {
throw new Error("Failed to update knowledge item or retrieve result");
}
// Update citations if provided in the input 'updates' object
const inputCitations = updates.citations;
if (inputCitations && Array.isArray(inputCitations)) {
// Remove existing CITES relationships first
await session.executeWrite(async (tx) => {
await tx.run(
`
MATCH (k:${NodeLabels.Knowledge} {id: $id})-[r:${RelationshipTypes.CITES}]->(:${NodeLabels.Citation})
DELETE r
`,
{ id },
);
});
// Add new CITES relationships
if (inputCitations.length > 0) {
await this.addCitations(id, inputCitations);
}
}
// Construct the final return object matching Neo4jKnowledge
const finalUpdatedKnowledge: Neo4jKnowledge = {
id: updatedKnowledgeRecord.get("id"),
projectId: updatedKnowledgeRecord.get("projectId"),
text: updatedKnowledgeRecord.get("text"),
tags: updatedKnowledgeRecord.get("tags") || [],
createdAt: updatedKnowledgeRecord.get("createdAt"),
updatedAt: updatedKnowledgeRecord.get("updatedAt"),
};
const reqContext_update = requestContextService.createRequestContext({
operation: "updateKnowledge",
knowledgeId: id,
});
logger.info("Knowledge item updated successfully", reqContext_update);
return finalUpdatedKnowledge;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "updateKnowledge.error",
knowledgeId: id,
updatesApplied: updates,
});
logger.error("Error updating knowledge item", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Delete a knowledge item
* @param id Knowledge ID
* @returns True if deleted, false if not found
*/
static async deleteKnowledge(id: string): Promise<boolean> {
const session = await neo4jDriver.getSession();
try {
const exists = await Neo4jUtils.nodeExists(
NodeLabels.Knowledge,
"id",
id,
);
if (!exists) {
return false;
}
// Use DETACH DELETE to remove the node and all its relationships
const query = `
MATCH (k:${NodeLabels.Knowledge} {id: $id})
DETACH DELETE k
`;
await session.executeWrite(async (tx) => {
await tx.run(query, { id });
});
const reqContext_delete = requestContextService.createRequestContext({
operation: "deleteKnowledge",
knowledgeId: id,
});
logger.info("Knowledge item deleted successfully", reqContext_delete);
return true;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "deleteKnowledge.error",
knowledgeId: id,
});
logger.error("Error deleting knowledge item", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Get knowledge items for a project with optional filtering and server-side pagination.
* Returns domain and citations via relationships.
* @param options Filter and pagination options
* @returns Paginated list of knowledge items including domain and citations
*/
static async getKnowledge(
options: KnowledgeFilterOptions,
): Promise<
PaginatedResult<
Neo4jKnowledge & { domain: string | null; citations: string[] }
>
> {
const session = await neo4jDriver.getSession();
try {
let conditions: string[] = [];
const params: Record<string, any> = {}; // Initialize empty params
// Conditionally add projectId to params if it's not '*'
if (options.projectId && options.projectId !== "*") {
params.projectId = options.projectId;
}
let domainMatchClause = "";
if (options.domain) {
params.domain = options.domain;
// Match the relationship for filtering
domainMatchClause = `MATCH (k)-[:${RelationshipTypes.BELONGS_TO_DOMAIN}]->(d:${NodeLabels.Domain} {name: $domain})`;
} else {
// Optionally match domain to return it
domainMatchClause = `OPTIONAL MATCH (k)-[:${RelationshipTypes.BELONGS_TO_DOMAIN}]->(d:${NodeLabels.Domain})`;
}
// Handle tags filtering
if (options.tags && options.tags.length > 0) {
const tagQuery = Neo4jUtils.generateArrayInListQuery(
"k",
"tags",
"tagsList",
options.tags,
);
if (tagQuery.cypher) {
conditions.push(tagQuery.cypher);
Object.assign(params, tagQuery.params);
}
}
// Handle text search (using regex - consider full-text index later)
if (options.search) {
// Use case-insensitive regex
params.search = `(?i).*${options.search.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")}.*`;
conditions.push("k.text =~ $search");
}
const whereClause =
conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
// Calculate pagination parameters
const page = Math.max(options.page || 1, 1);
const limit = Math.min(Math.max(options.limit || 20, 1), 100);
const skip = (page - 1) * limit;
// Add pagination params using neo4j.int
params.skip = int(skip);
params.limit = int(limit);
// Construct the base MATCH clause conditionally
const projectIdMatchFilter =
options.projectId && options.projectId !== "*"
? "{projectId: $projectId}"
: "";
const baseMatch = `MATCH (k:${NodeLabels.Knowledge} ${projectIdMatchFilter})`;
// Query for total count matching filters
const countQuery = `
${baseMatch} // Use conditional base match
${whereClause} // Apply filters to the knowledge node 'k' first
WITH k // Pass the filtered knowledge nodes
${domainMatchClause} // Now match domain relationship if needed for filtering
RETURN count(DISTINCT k) as total // Count distinct knowledge nodes
`;
// Query for paginated data
const dataQuery = `
${baseMatch} // Use conditional base match
${whereClause} // Apply filters to the knowledge node 'k' first
WITH k // Pass the filtered knowledge nodes
${domainMatchClause} // Match domain relationship
OPTIONAL MATCH (k)-[:${RelationshipTypes.CITES}]->(c:${NodeLabels.Citation}) // Match citations
WITH k, d, collect(DISTINCT c.source) as citationSources // Collect citations
RETURN k.id as id,
k.projectId as projectId,
k.text as text,
k.tags as tags,
d.name as domainName, // Return domain name from relationship
citationSources, // Return collected citations
k.createdAt as createdAt,
k.updatedAt as updatedAt
ORDER BY k.createdAt DESC
SKIP $skip
LIMIT $limit
`;
// Execute count query
const totalResult = await session.executeRead(async (tx) => {
// Need to remove skip/limit from params for count query
const countParams = { ...params };
delete countParams.skip;
delete countParams.limit;
const result = await tx.run(countQuery, countParams);
// The driver seems to return a standard number for count(), use ?? 0 for safety
return result.records[0]?.get("total") ?? 0;
});
// totalResult is now the standard number returned by executeRead
const total = totalResult;
// Execute data query
const dataResult = await session.executeRead(async (tx) => {
const result = await tx.run(dataQuery, params); // Use params with skip/limit
return result.records;
});
// Map results including domain and citations
const knowledgeItems = dataResult.map((record) => {
const baseKnowledge: Neo4jKnowledge = {
id: record.get("id"),
projectId: record.get("projectId"),
text: record.get("text"),
tags: record.get("tags") || [],
createdAt: record.get("createdAt"),
updatedAt: record.get("updatedAt"),
};
const domain = record.get("domainName");
const citations = record
.get("citationSources")
.filter((c: string | null): c is string => c !== null);
return {
...baseKnowledge,
domain: domain,
citations: citations,
};
});
// Return paginated result structure
const totalPages = Math.ceil(total / limit);
return {
data: knowledgeItems,
total,
page,
limit,
totalPages,
};
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const reqContext_get = requestContextService.createRequestContext({
operation: "getKnowledge.error",
filterOptions: options,
});
logger.error("Error getting knowledge items", error as Error, {
...reqContext_get,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Get all available domains with item counts
* @returns Array of domains with counts
*/
static async getDomains(): Promise<Array<{ name: string; count: number }>> {
const session = await neo4jDriver.getSession();
try {
// This query correctly uses the relationship already
const query = `
MATCH (d:${NodeLabels.Domain})<-[:${RelationshipTypes.BELONGS_TO_DOMAIN}]-(k:${NodeLabels.Knowledge})
RETURN d.name AS name, count(k) AS count
ORDER BY count DESC, name
`;
const result = await session.executeRead(async (tx) => {
const result = await tx.run(query);
return result.records;
});
return result.map((record) => ({
name: record.get("name"),
count: record.get("count").toNumber(), // Convert Neo4j int
}));
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const reqContext_domains = requestContextService.createRequestContext({
operation: "getDomains.error",
});
logger.error("Error getting domains", error as Error, {
...reqContext_domains,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Get all unique tags used across knowledge items with counts
* @param projectId Optional project ID to filter tags
* @returns Array of tags with counts
*/
static async getTags(
projectId?: string,
): Promise<Array<{ tag: string; count: number }>> {
const session = await neo4jDriver.getSession();
try {
let whereClause = "";
const params: Record<string, any> = {};
if (projectId) {
whereClause = "WHERE k.projectId = $projectId";
params.projectId = projectId;
}
// This query is fine as it only reads the tags property
const query = `
MATCH (k:${NodeLabels.Knowledge})
${whereClause}
UNWIND k.tags AS tag
RETURN tag, count(*) AS count
ORDER BY count DESC, tag
`;
const result = await session.executeRead(async (tx) => {
const result = await tx.run(query, params);
return result.records;
});
return result.map((record) => ({
tag: record.get("tag"),
count: record.get("count").toNumber(), // Convert Neo4j int
}));
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const reqContext_tags = requestContextService.createRequestContext({
operation: "getTags.error",
projectId,
});
logger.error("Error getting tags", error as Error, {
...reqContext_tags,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Add CITES relationships from a knowledge item to new Citation nodes.
* @param knowledgeId Knowledge ID
* @param citations Array of citation source strings
* @returns The IDs of the created Citation nodes
* @private
*/
private static async addCitations(
knowledgeId: string,
citations: string[],
): Promise<string[]> {
if (!citations || citations.length === 0) {
return [];
}
const session = await neo4jDriver.getSession();
try {
const citationData = citations.map((source) => ({
id: `cite_${generateId()}`,
source: source,
createdAt: Neo4jUtils.getCurrentTimestamp(),
}));
const query = `
MATCH (k:${NodeLabels.Knowledge} {id: $knowledgeId})
UNWIND $citationData as citationProps
CREATE (c:${NodeLabels.Citation})
SET c = citationProps
CREATE (k)-[:${RelationshipTypes.CITES}]->(c)
RETURN c.id as citationId
`;
const result = await session.executeWrite(async (tx) => {
const res = await tx.run(query, { knowledgeId, citationData });
return res.records.map((r) => r.get("citationId"));
});
const reqContext_addCite = requestContextService.createRequestContext({
operation: "addCitations",
knowledgeId,
citationCount: result.length,
});
logger.debug(
`Added ${result.length} citations for knowledge ${knowledgeId}`,
reqContext_addCite,
);
return result;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "addCitations.error",
knowledgeId,
citations,
});
logger.error("Error adding citations", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
}
```
--------------------------------------------------------------------------------
/src/services/neo4j/taskService.ts:
--------------------------------------------------------------------------------
```typescript
import { int } from "neo4j-driver"; // Import 'int' for pagination
import { logger, requestContextService } from "../../utils/index.js"; // Updated import path
import { neo4jDriver } from "./driver.js";
import { generateId, buildListQuery } from "./helpers.js"; // Import buildListQuery
import {
Neo4jTask, // This type no longer has assignedTo
NodeLabels,
PaginatedResult,
RelationshipTypes,
TaskFilterOptions,
} from "./types.js";
import { Neo4jUtils } from "./utils.js";
/**
* Service for managing Task entities in Neo4j
*/
export class TaskService {
/**
* Create a new task and optionally assign it to a user.
* @param task Task data, including optional assignedTo for relationship creation
* @returns The created task
*/
static async createTask(
task: Omit<Neo4jTask, "id" | "createdAt" | "updatedAt"> & {
id?: string;
assignedTo?: string;
},
): Promise<Neo4jTask & { assignedToUserId?: string | null }> {
const session = await neo4jDriver.getSession();
try {
const projectExists = await Neo4jUtils.nodeExists(
NodeLabels.Project,
"id",
task.projectId,
);
if (!projectExists) {
throw new Error(`Project with ID ${task.projectId} not found`);
}
const taskId = task.id || `task_${generateId()}`;
const now = Neo4jUtils.getCurrentTimestamp();
const assignedToUserId = task.assignedTo; // Get assignee from input
// No longer check if user exists here, will use MERGE later
// Serialize urls to JSON string
const query = `
MATCH (p:${NodeLabels.Project} {id: $projectId})
CREATE (t:${NodeLabels.Task} {
id: $id,
projectId: $projectId,
title: $title,
description: $description,
priority: $priority,
status: $status,
// assignedTo removed
urls: $urls,
tags: $tags,
completionRequirements: $completionRequirements,
outputFormat: $outputFormat,
taskType: $taskType,
createdAt: $createdAt,
updatedAt: $updatedAt
})
CREATE (p)-[:${RelationshipTypes.CONTAINS_TASK}]->(t)
// Optionally create ASSIGNED_TO relationship using MERGE for the User node
WITH t
${assignedToUserId ? `MERGE (u:${NodeLabels.User} {id: $assignedToUserId}) ON CREATE SET u.createdAt = $createdAt CREATE (t)-[:${RelationshipTypes.ASSIGNED_TO}]->(u)` : ""}
// Return properties defined in Neo4jTask
WITH t // Ensure t is in scope before optional match
OPTIONAL MATCH (t)-[:${RelationshipTypes.ASSIGNED_TO}]->(assigned_user:${NodeLabels.User}) // Match to get assigned user's ID
RETURN t.id as id,
t.projectId as projectId,
t.title as title,
t.description as description,
t.priority as priority,
t.status as status,
assigned_user.id as assignedToUserId, // Add this
t.urls as urls,
t.tags as tags,
t.completionRequirements as completionRequirements,
t.outputFormat as outputFormat,
t.taskType as taskType,
t.createdAt as createdAt,
t.updatedAt as updatedAt
`;
// Serialize urls to JSON string
const params: Record<string, any> = {
id: taskId,
projectId: task.projectId,
title: task.title,
description: task.description,
priority: task.priority,
status: task.status,
// assignedTo removed from params
urls: JSON.stringify(task.urls || []), // Serialize urls
tags: task.tags || [],
completionRequirements: task.completionRequirements,
outputFormat: task.outputFormat,
taskType: task.taskType,
createdAt: now,
updatedAt: now,
};
if (assignedToUserId) {
params.assignedToUserId = assignedToUserId;
}
const result = await session.executeWrite(async (tx) => {
const result = await tx.run(query, params);
// Use .get() for each field
return result.records.length > 0 ? result.records[0] : null;
});
if (!result) {
throw new Error("Failed to create task or retrieve its properties");
}
// Construct the Neo4jTask object - deserialize urls
const createdTaskData: Neo4jTask & { assignedToUserId?: string | null } =
{
id: result.get("id"),
projectId: result.get("projectId"),
title: result.get("title"),
description: result.get("description"),
priority: result.get("priority"),
status: result.get("status"),
urls: JSON.parse(result.get("urls") || "[]"), // Deserialize urls
tags: result.get("tags") || [],
completionRequirements: result.get("completionRequirements"),
outputFormat: result.get("outputFormat"),
taskType: result.get("taskType"),
createdAt: result.get("createdAt"),
updatedAt: result.get("updatedAt"),
assignedToUserId: result.get("assignedToUserId") || null,
};
const reqContext_create = requestContextService.createRequestContext({
operation: "createTask",
taskId: createdTaskData.id,
projectId: task.projectId,
assignedToUserId,
});
logger.info("Task created successfully", reqContext_create);
return createdTaskData;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "createTask.error",
taskInput: task,
});
logger.error("Error creating task", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Link a Task to a Knowledge item with a specified relationship type.
* @param taskId ID of the source Task item
* @param knowledgeId ID of the target Knowledge item
* @param relationshipType The type of relationship to create (e.g., 'ADDRESSES', 'REFERENCES') - Validation needed
* @returns True if the link was created successfully, false otherwise
*/
static async linkTaskToKnowledge(
taskId: string,
knowledgeId: string,
relationshipType: string,
): Promise<boolean> {
// TODO: Validate relationshipType against allowed types or RelationshipTypes enum
const session = await neo4jDriver.getSession();
const reqContext_link = requestContextService.createRequestContext({
operation: "linkTaskToKnowledge",
taskId,
knowledgeId,
relationshipType,
});
logger.debug(
`Attempting to link task ${taskId} to knowledge ${knowledgeId} with type ${relationshipType}`,
reqContext_link,
);
try {
const taskExists = await Neo4jUtils.nodeExists(
NodeLabels.Task,
"id",
taskId,
);
const knowledgeExists = await Neo4jUtils.nodeExists(
NodeLabels.Knowledge,
"id",
knowledgeId,
);
if (!taskExists || !knowledgeExists) {
logger.warning(
`Cannot link: Task (${taskId} exists: ${taskExists}) or Knowledge (${knowledgeId} exists: ${knowledgeExists}) not found.`,
{ ...reqContext_link, taskExists, knowledgeExists },
);
return false;
}
const escapedType = `\`${relationshipType.replace(/`/g, "``")}\``;
const query = `
MATCH (task:${NodeLabels.Task} {id: $taskId})
MATCH (knowledge:${NodeLabels.Knowledge} {id: $knowledgeId})
MERGE (task)-[r:${escapedType}]->(knowledge)
RETURN r
`;
const result = await session.executeWrite(async (tx) => {
const runResult = await tx.run(query, { taskId, knowledgeId });
return runResult.records;
});
const linkCreated = result.length > 0;
if (linkCreated) {
logger.info(
`Successfully linked task ${taskId} to knowledge ${knowledgeId} with type ${relationshipType}`,
reqContext_link,
);
} else {
logger.warning(
`Failed to link task ${taskId} to knowledge ${knowledgeId} (MERGE returned no relationship)`,
reqContext_link,
);
}
return linkCreated;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
logger.error("Error linking task to knowledge item", error as Error, {
...reqContext_link,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Get a task by ID, including the assigned user ID via relationship.
* @param id Task ID
* @returns The task with assignedToUserId property, or null if not found.
*/
static async getTaskById(
id: string,
): Promise<(Neo4jTask & { assignedToUserId: string | null }) | null> {
const session = await neo4jDriver.getSession();
try {
// Retrieve urls as JSON string
const query = `
MATCH (t:${NodeLabels.Task} {id: $id})
OPTIONAL MATCH (t)-[:${RelationshipTypes.ASSIGNED_TO}]->(u:${NodeLabels.User})
RETURN t.id as id,
t.projectId as projectId,
t.title as title,
t.description as description,
t.priority as priority,
t.status as status,
u.id as assignedToUserId,
t.urls as urls,
t.tags as tags,
t.completionRequirements as completionRequirements,
t.outputFormat as outputFormat,
t.taskType as taskType,
t.createdAt as createdAt,
t.updatedAt as updatedAt
`;
const result = await session.executeRead(async (tx) => {
const result = await tx.run(query, { id });
return result.records;
});
if (result.length === 0) {
return null;
}
const record = result[0];
// Construct the base Neo4jTask object - deserialize urls
const taskData: Neo4jTask = {
id: record.get("id"),
projectId: record.get("projectId"),
title: record.get("title"),
description: record.get("description"),
priority: record.get("priority"),
status: record.get("status"),
urls: JSON.parse(record.get("urls") || "[]"), // Deserialize urls
tags: record.get("tags") || [],
completionRequirements: record.get("completionRequirements"),
outputFormat: record.get("outputFormat"),
taskType: record.get("taskType"),
createdAt: record.get("createdAt"),
updatedAt: record.get("updatedAt"),
};
const assignedToUserId = record.get("assignedToUserId");
return {
...taskData,
assignedToUserId: assignedToUserId,
};
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "getTaskById.error",
taskId: id,
});
logger.error("Error getting task by ID", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Check if all dependencies of a task are completed
* @param taskId Task ID to check dependencies for
* @returns True if all dependencies are completed, false otherwise
*/
static async areAllDependenciesCompleted(taskId: string): Promise<boolean> {
const session = await neo4jDriver.getSession();
try {
const query = `
MATCH (t:${NodeLabels.Task} {id: $taskId})-[:${RelationshipTypes.DEPENDS_ON}]->(dep:${NodeLabels.Task})
WHERE dep.status <> 'completed'
RETURN count(dep) AS incompleteCount
`;
const result = await session.executeRead(async (tx) => {
const result = await tx.run(query, { taskId });
// Use standard number directly, Neo4j count() returns a number, not an Integer object
return result.records[0]?.get("incompleteCount") || 0;
});
return result === 0;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "areAllDependenciesCompleted.error",
taskId,
});
logger.error(
"Error checking task dependencies completion status",
error as Error,
{ ...errorContext, detail: errorMessage },
);
throw error;
} finally {
await session.close();
}
}
/**
* Update a task's properties and handle assignment changes via relationships.
* @param id Task ID
* @param updates Task updates, including optional assignedTo for relationship changes
* @returns The updated task (without assignedTo property)
*/
static async updateTask(
id: string,
updates: Partial<
Omit<Neo4jTask, "id" | "projectId" | "createdAt" | "updatedAt">
> & { assignedTo?: string | null },
): Promise<Neo4jTask> {
const session = await neo4jDriver.getSession();
try {
const exists = await Neo4jUtils.nodeExists(NodeLabels.Task, "id", id);
if (!exists) {
throw new Error(`Task with ID ${id} not found`);
}
if (updates.status === "in-progress" || updates.status === "completed") {
const depsCompleted = await this.areAllDependenciesCompleted(id);
if (!depsCompleted) {
throw new Error(
`Cannot mark task as ${updates.status} because not all dependencies are completed`,
);
}
}
const updateParams: Record<string, any> = {
id,
updatedAt: Neo4jUtils.getCurrentTimestamp(),
};
let setClauses = ["t.updatedAt = $updatedAt"];
const allowedProperties: (keyof Neo4jTask)[] = [
"projectId",
"title",
"description",
"priority",
"status",
"urls",
"tags",
"completionRequirements",
"outputFormat",
"taskType",
];
// Handle property updates - serialize urls if present
for (const [key, value] of Object.entries(updates)) {
if (
value !== undefined &&
key !== "assignedTo" &&
allowedProperties.includes(key as keyof Neo4jTask)
) {
// Serialize urls array to JSON string if it's the key being updated
updateParams[key] =
key === "urls" ? JSON.stringify(value || []) : value;
setClauses.push(`t.${key} = $${key}`);
}
}
// Handle assignment change (logic remains the same)
let assignmentClause = "";
const newAssigneeId = updates.assignedTo;
if (newAssigneeId !== undefined) {
// Check if assignedTo is part of the update
if (newAssigneeId === null) {
// Unassign: Delete existing relationship
assignmentClause = `
WITH t
OPTIONAL MATCH (t)-[oldRel:${RelationshipTypes.ASSIGNED_TO}]->(:${NodeLabels.User})
DELETE oldRel
`;
} else {
// Assign/Reassign: Use MERGE for the user node
updateParams.newAssigneeId = newAssigneeId;
assignmentClause = `
WITH t
OPTIONAL MATCH (t)-[oldRel:${RelationshipTypes.ASSIGNED_TO}]->(:${NodeLabels.User})
DELETE oldRel
WITH t
MERGE (newUser:${NodeLabels.User} {id: $newAssigneeId})
ON CREATE SET newUser.createdAt = $updatedAt
CREATE (t)-[:${RelationshipTypes.ASSIGNED_TO}]->(newUser)
`;
}
}
// Retrieve urls as JSON string
const query = `
MATCH (t:${NodeLabels.Task} {id: $id})
${setClauses.length > 0 ? `SET ${setClauses.join(", ")}` : ""}
${assignmentClause}
// Return properties defined in Neo4jTask
RETURN t.id as id,
t.projectId as projectId,
t.title as title,
t.description as description,
t.priority as priority,
t.status as status,
t.urls as urls,
t.tags as tags,
t.completionRequirements as completionRequirements,
t.outputFormat as outputFormat,
t.taskType as taskType,
t.createdAt as createdAt,
t.updatedAt as updatedAt
`;
const result = await session.executeWrite(async (tx) => {
const result = await tx.run(query, updateParams);
// Use .get() for each field
return result.records.length > 0 ? result.records[0] : null;
});
if (!result) {
throw new Error("Failed to update task or retrieve its properties");
}
// Construct the Neo4jTask object - deserialize urls
const updatedTaskData: Neo4jTask = {
id: result.get("id"),
projectId: result.get("projectId"),
title: result.get("title"),
description: result.get("description"),
priority: result.get("priority"),
status: result.get("status"),
urls: JSON.parse(result.get("urls") || "[]"), // Deserialize urls
tags: result.get("tags") || [],
completionRequirements: result.get("completionRequirements"),
outputFormat: result.get("outputFormat"),
taskType: result.get("taskType"),
createdAt: result.get("createdAt"),
updatedAt: result.get("updatedAt"),
};
const reqContext_update = requestContextService.createRequestContext({
operation: "updateTask",
taskId: id,
});
logger.info("Task updated successfully", reqContext_update);
return updatedTaskData;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "updateTask.error",
taskId: id,
updatesApplied: updates,
});
logger.error("Error updating task", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Delete a task
* @param id Task ID
* @returns True if deleted, false if not found
*/
static async deleteTask(id: string): Promise<boolean> {
const session = await neo4jDriver.getSession();
try {
const exists = await Neo4jUtils.nodeExists(NodeLabels.Task, "id", id);
if (!exists) {
return false;
}
const query = `
MATCH (t:${NodeLabels.Task} {id: $id})
DETACH DELETE t
`;
await session.executeWrite(async (tx) => {
await tx.run(query, { id });
});
const reqContext_delete = requestContextService.createRequestContext({
operation: "deleteTask",
taskId: id,
});
logger.info("Task deleted successfully", reqContext_delete);
return true;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "deleteTask.error",
taskId: id,
});
logger.error("Error deleting task", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Get tasks for a project with optional filtering and server-side pagination.
* Includes assigned user ID via relationship.
* @param options Filter and pagination options
* @returns Paginated list of tasks including assignedToUserId
*/
static async getTasks(
options: TaskFilterOptions,
): Promise<PaginatedResult<Neo4jTask & { assignedToUserId: string | null }>> {
const session = await neo4jDriver.getSession();
try {
const nodeAlias = "t";
const userAlias = "u"; // Alias for the User node
// Define how to match the assigned user relationship
let assignmentMatchClause = `OPTIONAL MATCH (${nodeAlias})-[:${RelationshipTypes.ASSIGNED_TO}]->(${userAlias}:${NodeLabels.User})`;
if (options.assignedTo) {
// If filtering by assignee, make the MATCH non-optional and filter by user ID
assignmentMatchClause = `MATCH (${nodeAlias})-[:${RelationshipTypes.ASSIGNED_TO}]->(${userAlias}:${NodeLabels.User} {id: $assignedTo})`;
}
// Define the properties to return from the query
const returnProperties = [
`${nodeAlias}.id as id`,
`${nodeAlias}.projectId as projectId`,
`${nodeAlias}.title as title`,
`${nodeAlias}.description as description`,
`${nodeAlias}.priority as priority`,
`${nodeAlias}.status as status`,
`${userAlias}.id as assignedToUserId`, // Get user ID from the relationship
`${nodeAlias}.urls as urls`,
`${nodeAlias}.tags as tags`,
`${nodeAlias}.completionRequirements as completionRequirements`,
`${nodeAlias}.outputFormat as outputFormat`,
`${nodeAlias}.taskType as taskType`,
`${nodeAlias}.createdAt as createdAt`,
`${nodeAlias}.updatedAt as updatedAt`,
];
// Use the buildListQuery helper
const { countQuery, dataQuery, params } = buildListQuery(
NodeLabels.Task,
returnProperties,
{
// Filters
projectId: options.projectId, // Pass projectId filter
status: options.status,
priority: options.priority,
assignedTo: options.assignedTo, // Pass assignedTo for potential filtering in helper/match clause
tags: options.tags,
taskType: options.taskType,
},
{
// Pagination
sortBy: options.sortBy,
sortDirection: options.sortDirection,
page: options.page,
limit: options.limit,
},
nodeAlias, // Primary node alias
assignmentMatchClause, // Additional MATCH clause for assignment
);
const reqContext_list = requestContextService.createRequestContext({
operation: "getTasks",
filterOptions: options,
});
// Execute count query
const totalResult = await session.executeRead(async (tx) => {
// buildListQuery returns params including skip/limit, remove them for count
const countParams = { ...params };
delete countParams.skip;
delete countParams.limit;
logger.debug("Executing Task Count Query (using buildListQuery):", {
...reqContext_list,
query: countQuery,
params: countParams,
});
const result = await tx.run(countQuery, countParams);
return result.records[0]?.get("total") ?? 0;
});
const total = totalResult;
// Execute data query
const dataResult = await session.executeRead(async (tx) => {
logger.debug("Executing Task Data Query (using buildListQuery):", {
...reqContext_list,
query: dataQuery,
params: params,
});
const result = await tx.run(dataQuery, params);
return result.records;
});
// Map results - deserialize urls
const tasks = dataResult.map((record) => {
// Construct the base Neo4jTask object
const taskData: Neo4jTask = {
id: record.get("id"),
projectId: record.get("projectId"),
title: record.get("title"),
description: record.get("description"),
priority: record.get("priority"),
status: record.get("status"),
urls: JSON.parse(record.get("urls") || "[]"), // Deserialize urls
tags: record.get("tags") || [],
completionRequirements: record.get("completionRequirements"),
outputFormat: record.get("outputFormat"),
taskType: record.get("taskType"),
createdAt: record.get("createdAt"),
updatedAt: record.get("updatedAt"),
};
// Get the assigned user ID from the record
const assignedToUserId = record.get("assignedToUserId");
// Combine base task data with the user ID
return {
...taskData,
assignedToUserId: assignedToUserId,
};
});
const page = Math.max(options.page || 1, 1);
const limit = Math.min(Math.max(options.limit || 20, 1), 100);
const totalPages = Math.ceil(total / limit);
return {
data: tasks,
total,
page,
limit,
totalPages,
};
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "getTasks.error",
filterOptions: options,
});
logger.error("Error getting tasks", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Add a dependency relationship between tasks
* @param sourceTaskId ID of the dependent task (source)
* @param targetTaskId ID of the dependency task (target)
* @returns The IDs of the two tasks and the relationship ID
*/
static async addTaskDependency(
sourceTaskId: string,
targetTaskId: string,
): Promise<{ id: string; sourceTaskId: string; targetTaskId: string }> {
const session = await neo4jDriver.getSession();
try {
// Logic remains the same
const sourceExists = await Neo4jUtils.nodeExists(
NodeLabels.Task,
"id",
sourceTaskId,
);
const targetExists = await Neo4jUtils.nodeExists(
NodeLabels.Task,
"id",
targetTaskId,
);
if (!sourceExists)
throw new Error(`Source task with ID ${sourceTaskId} not found`);
if (!targetExists)
throw new Error(`Target task with ID ${targetTaskId} not found`);
const dependencyExists = await Neo4jUtils.relationshipExists(
NodeLabels.Task,
"id",
sourceTaskId,
NodeLabels.Task,
"id",
targetTaskId,
RelationshipTypes.DEPENDS_ON,
);
if (dependencyExists) {
throw new Error(
`Dependency relationship already exists between tasks ${sourceTaskId} and ${targetTaskId}`,
);
}
const circularDependencyQuery = `
MATCH path = (target:${NodeLabels.Task} {id: $targetTaskId})-[:${RelationshipTypes.DEPENDS_ON}*]->(source:${NodeLabels.Task} {id: $sourceTaskId})
RETURN count(path) > 0 AS hasCycle
`;
const cycleCheckResult = await session.executeRead(async (tx) => {
const result = await tx.run(circularDependencyQuery, {
sourceTaskId,
targetTaskId,
});
return result.records[0]?.get("hasCycle");
});
if (cycleCheckResult) {
throw new Error(
"Adding this dependency would create a circular dependency chain",
);
}
const dependencyId = `tdep_${generateId()}`;
const query = `
MATCH (source:${NodeLabels.Task} {id: $sourceTaskId}),
(target:${NodeLabels.Task} {id: $targetTaskId})
CREATE (source)-[r:${RelationshipTypes.DEPENDS_ON} {
id: $dependencyId,
createdAt: $createdAt
}]->(target)
RETURN r.id as id, source.id as sourceTaskId, target.id as targetTaskId
`;
const params = {
sourceTaskId,
targetTaskId,
dependencyId,
createdAt: Neo4jUtils.getCurrentTimestamp(),
};
const result = await session.executeWrite(async (tx) => {
const result = await tx.run(query, params);
return result.records;
});
if (!result || result.length === 0) {
throw new Error("Failed to create task dependency relationship");
}
const record = result[0];
const dependency = {
id: record.get("id"),
sourceTaskId: record.get("sourceTaskId"),
targetTaskId: record.get("targetTaskId"),
};
const reqContext_addDep = requestContextService.createRequestContext({
operation: "addTaskDependency",
sourceTaskId,
targetTaskId,
});
logger.info("Task dependency added successfully", reqContext_addDep);
return dependency;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "addTaskDependency.error",
sourceTaskId,
targetTaskId,
});
logger.error("Error adding task dependency", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Remove a dependency relationship between tasks
* @param dependencyId The ID of the dependency relationship to remove
* @returns True if removed, false if not found
*/
static async removeTaskDependency(dependencyId: string): Promise<boolean> {
const session = await neo4jDriver.getSession();
try {
const query = `
MATCH (source:${NodeLabels.Task})-[r:${RelationshipTypes.DEPENDS_ON} {id: $dependencyId}]->(target:${NodeLabels.Task})
DELETE r
`;
const result = await session.executeWrite(async (tx) => {
const res = await tx.run(query, { dependencyId });
return res.summary.counters.updates().relationshipsDeleted > 0;
});
const reqContext_removeDep = requestContextService.createRequestContext({
operation: "removeTaskDependency",
dependencyId,
});
if (result) {
logger.info(
"Task dependency removed successfully",
reqContext_removeDep,
);
} else {
logger.warning(
"Task dependency not found or not removed",
reqContext_removeDep,
);
}
return result;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "removeTaskDependency.error",
dependencyId,
});
logger.error("Error removing task dependency", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Get task dependencies (both dependencies and dependents)
* @param taskId Task ID
* @returns Object containing dependencies and dependents
*/
static async getTaskDependencies(taskId: string): Promise<{
dependencies: {
id: string; // Relationship ID
taskId: string; // Target Task ID
title: string;
status: string;
priority: string;
}[];
dependents: {
id: string; // Relationship ID
taskId: string; // Source Task ID
title: string;
status: string;
priority: string;
}[];
}> {
const session = await neo4jDriver.getSession();
try {
// Logic remains the same
const exists = await Neo4jUtils.nodeExists(NodeLabels.Task, "id", taskId);
if (!exists) {
throw new Error(`Task with ID ${taskId} not found`);
}
const dependenciesQuery = `
MATCH (source:${NodeLabels.Task} {id: $taskId})-[r:${RelationshipTypes.DEPENDS_ON}]->(target:${NodeLabels.Task})
RETURN r.id as id,
target.id AS taskId,
target.title AS title,
target.status AS status,
target.priority AS priority
ORDER BY target.priority DESC, target.title
`;
const dependentsQuery = `
MATCH (source:${NodeLabels.Task})-[r:${RelationshipTypes.DEPENDS_ON}]->(target:${NodeLabels.Task} {id: $taskId})
RETURN r.id as id,
source.id AS taskId,
source.title AS title,
source.status AS status,
source.priority AS priority
ORDER BY source.priority DESC, source.title
`;
const [dependenciesResult, dependentsResult] = await Promise.all([
session.executeRead(
async (tx) => (await tx.run(dependenciesQuery, { taskId })).records,
),
session.executeRead(
async (tx) => (await tx.run(dependentsQuery, { taskId })).records,
),
]);
const dependencies = dependenciesResult.map((record) => ({
id: record.get("id"),
taskId: record.get("taskId"),
title: record.get("title"),
status: record.get("status"),
priority: record.get("priority"),
}));
const dependents = dependentsResult.map((record) => ({
id: record.get("id"),
taskId: record.get("taskId"),
title: record.get("title"),
status: record.get("status"),
priority: record.get("priority"),
}));
return { dependencies, dependents };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "getTaskDependencies.error",
taskId,
});
logger.error("Error getting task dependencies", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Assign a task to a user by creating an ASSIGNED_TO relationship.
* @param taskId Task ID
* @param userId User ID
* @returns The updated task (without assignedTo property)
*/
static async assignTask(taskId: string, userId: string): Promise<Neo4jTask> {
const session = await neo4jDriver.getSession();
try {
// Logic remains the same
const taskExists = await Neo4jUtils.nodeExists(
NodeLabels.Task,
"id",
taskId,
);
if (!taskExists) throw new Error(`Task with ID ${taskId} not found`);
const userExists = await Neo4jUtils.nodeExists(
NodeLabels.User,
"id",
userId,
);
if (!userExists) throw new Error(`User with ID ${userId} not found`);
const query = `
MATCH (t:${NodeLabels.Task} {id: $taskId}), (u:${NodeLabels.User} {id: $userId})
OPTIONAL MATCH (t)-[r:${RelationshipTypes.ASSIGNED_TO}]->(:${NodeLabels.User})
DELETE r
CREATE (t)-[:${RelationshipTypes.ASSIGNED_TO}]->(u)
SET t.updatedAt = $updatedAt
// Return properties defined in Neo4jTask
RETURN t.id as id,
t.projectId as projectId,
t.title as title,
t.description as description,
t.priority as priority,
t.status as status,
// assignedTo removed
t.urls as urls,
t.tags as tags,
t.completionRequirements as completionRequirements,
t.outputFormat as outputFormat,
t.taskType as taskType,
t.createdAt as createdAt,
t.updatedAt as updatedAt
`;
const params = {
taskId,
userId,
updatedAt: Neo4jUtils.getCurrentTimestamp(),
};
const result = await session.executeWrite(async (tx) => {
const result = await tx.run(query, params);
// Use .get() for each field
return result.records.length > 0 ? result.records[0] : null;
});
if (!result) {
throw new Error("Failed to assign task or retrieve its properties");
}
// Construct the Neo4jTask object - deserialize urls
const updatedTaskData: Neo4jTask = {
id: result.get("id"),
projectId: result.get("projectId"),
title: result.get("title"),
description: result.get("description"),
priority: result.get("priority"),
status: result.get("status"),
urls: JSON.parse(result.get("urls") || "[]"), // Deserialize urls
tags: result.get("tags") || [],
completionRequirements: result.get("completionRequirements"),
outputFormat: result.get("outputFormat"),
taskType: result.get("taskType"),
createdAt: result.get("createdAt"),
updatedAt: result.get("updatedAt"),
};
const reqContext_assign = requestContextService.createRequestContext({
operation: "assignTask",
taskId,
userId,
});
logger.info("Task assigned successfully", reqContext_assign);
return updatedTaskData;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "assignTask.error",
taskId,
userId,
});
logger.error("Error assigning task", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
/**
* Unassign a task by deleting the ASSIGNED_TO relationship.
* @param taskId Task ID
* @returns The updated task (without assignedTo property)
*/
static async unassignTask(taskId: string): Promise<Neo4jTask> {
const session = await neo4jDriver.getSession();
try {
// Logic remains the same
const taskExists = await Neo4jUtils.nodeExists(
NodeLabels.Task,
"id",
taskId,
);
if (!taskExists) throw new Error(`Task with ID ${taskId} not found`);
const query = `
MATCH (t:${NodeLabels.Task} {id: $taskId})
OPTIONAL MATCH (t)-[r:${RelationshipTypes.ASSIGNED_TO}]->(:${NodeLabels.User})
DELETE r
SET t.updatedAt = $updatedAt
// Return properties defined in Neo4jTask
RETURN t.id as id,
t.projectId as projectId,
t.title as title,
t.description as description,
t.priority as priority,
t.status as status,
// assignedTo removed
t.urls as urls,
t.tags as tags,
t.completionRequirements as completionRequirements,
t.outputFormat as outputFormat,
t.taskType as taskType,
t.createdAt as createdAt,
t.updatedAt as updatedAt
`;
const params = {
taskId,
updatedAt: Neo4jUtils.getCurrentTimestamp(),
};
const result = await session.executeWrite(async (tx) => {
const result = await tx.run(query, params);
// Use .get() for each field
return result.records.length > 0 ? result.records[0] : null;
});
if (!result) {
throw new Error("Failed to unassign task or retrieve its properties");
}
// Construct the Neo4jTask object - deserialize urls
const updatedTaskData: Neo4jTask = {
id: result.get("id"),
projectId: result.get("projectId"),
title: result.get("title"),
description: result.get("description"),
priority: result.get("priority"),
status: result.get("status"),
urls: JSON.parse(result.get("urls") || "[]"), // Deserialize urls
tags: result.get("tags") || [],
completionRequirements: result.get("completionRequirements"),
outputFormat: result.get("outputFormat"),
taskType: result.get("taskType"),
createdAt: result.get("createdAt"),
updatedAt: result.get("updatedAt"),
};
const reqContext_unassign = requestContextService.createRequestContext({
operation: "unassignTask",
taskId,
});
logger.info("Task unassigned successfully", reqContext_unassign);
return updatedTaskData;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
const errorContext = requestContextService.createRequestContext({
operation: "unassignTask.error",
taskId,
});
logger.error("Error unassigning task", error as Error, {
...errorContext,
detail: errorMessage,
});
throw error;
} finally {
await session.close();
}
}
}
```