This is page 5 of 7. Use http://codebase.md/chillbruhhh/crawl4ai-mcp?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .env.example
├── .gitattributes
├── .gitignore
├── crawled_pages.sql
├── Dockerfile
├── knowledge_graphs
│ ├── ai_hallucination_detector.py
│ ├── ai_script_analyzer.py
│ ├── hallucination_reporter.py
│ ├── knowledge_graph_validator.py
│ ├── parse_repo_into_neo4j.py
│ ├── query_knowledge_graph.py
│ └── test_script.py
├── LICENSE
├── neo4j
│ └── docker-neo4j
│ ├── .github
│ │ └── ISSUE_TEMPLATE
│ │ └── bug_report.md
│ ├── .gitignore
│ ├── build-docker-image.sh
│ ├── build-utils-common-functions.sh
│ ├── COPYRIGHT
│ ├── DEVELOPMENT.md
│ ├── devenv
│ ├── devenv.local.template
│ ├── docker-image-src
│ │ ├── 2.3
│ │ │ ├── docker-entrypoint.sh
│ │ │ └── Dockerfile
│ │ ├── 3.0
│ │ │ ├── docker-entrypoint.sh
│ │ │ └── Dockerfile
│ │ ├── 3.1
│ │ │ ├── docker-entrypoint.sh
│ │ │ └── Dockerfile
│ │ ├── 3.2
│ │ │ ├── docker-entrypoint.sh
│ │ │ └── Dockerfile
│ │ ├── 3.3
│ │ │ ├── docker-entrypoint.sh
│ │ │ └── Dockerfile
│ │ ├── 3.4
│ │ │ ├── docker-entrypoint.sh
│ │ │ └── Dockerfile
│ │ ├── 3.5
│ │ │ ├── coredb
│ │ │ │ ├── docker-entrypoint.sh
│ │ │ │ ├── Dockerfile
│ │ │ │ └── neo4j-plugins.json
│ │ │ └── neo4j-admin
│ │ │ ├── docker-entrypoint.sh
│ │ │ └── Dockerfile
│ │ ├── 4.0
│ │ │ ├── coredb
│ │ │ │ ├── docker-entrypoint.sh
│ │ │ │ └── Dockerfile
│ │ │ └── neo4j-admin
│ │ │ ├── docker-entrypoint.sh
│ │ │ └── Dockerfile
│ │ ├── 4.1
│ │ │ ├── coredb
│ │ │ │ ├── docker-entrypoint.sh
│ │ │ │ └── Dockerfile
│ │ │ └── neo4j-admin
│ │ │ ├── docker-entrypoint.sh
│ │ │ └── Dockerfile
│ │ ├── 4.2
│ │ │ ├── coredb
│ │ │ │ ├── docker-entrypoint.sh
│ │ │ │ ├── Dockerfile
│ │ │ │ └── neo4j-plugins.json
│ │ │ └── neo4j-admin
│ │ │ ├── docker-entrypoint.sh
│ │ │ └── Dockerfile
│ │ ├── 4.3
│ │ │ ├── coredb
│ │ │ │ ├── docker-entrypoint.sh
│ │ │ │ ├── Dockerfile
│ │ │ │ └── neo4j-plugins.json
│ │ │ └── neo4j-admin
│ │ │ ├── docker-entrypoint.sh
│ │ │ └── Dockerfile
│ │ ├── 4.4
│ │ │ ├── coredb
│ │ │ │ ├── docker-entrypoint.sh
│ │ │ │ ├── Dockerfile-debian
│ │ │ │ ├── Dockerfile-ubi9
│ │ │ │ ├── neo4j-admin-report.sh
│ │ │ │ └── neo4j-plugins.json
│ │ │ └── neo4j-admin
│ │ │ ├── docker-entrypoint.sh
│ │ │ ├── Dockerfile-debian
│ │ │ └── Dockerfile-ubi9
│ │ ├── 5
│ │ │ ├── coredb
│ │ │ │ ├── docker-entrypoint.sh
│ │ │ │ ├── Dockerfile-debian
│ │ │ │ ├── Dockerfile-ubi8
│ │ │ │ ├── Dockerfile-ubi9
│ │ │ │ ├── neo4j-admin-report.sh
│ │ │ │ └── neo4j-plugins.json
│ │ │ └── neo4j-admin
│ │ │ ├── docker-entrypoint.sh
│ │ │ ├── Dockerfile-debian
│ │ │ ├── Dockerfile-ubi8
│ │ │ └── Dockerfile-ubi9
│ │ ├── calver
│ │ │ ├── coredb
│ │ │ │ ├── docker-entrypoint.sh
│ │ │ │ ├── Dockerfile-debian
│ │ │ │ ├── Dockerfile-ubi9
│ │ │ │ ├── neo4j-admin-report.sh
│ │ │ │ └── neo4j-plugins.json
│ │ │ └── neo4j-admin
│ │ │ ├── docker-entrypoint.sh
│ │ │ ├── Dockerfile-debian
│ │ │ └── Dockerfile-ubi9
│ │ └── common
│ │ ├── semver.jq
│ │ └── utilities.sh
│ ├── generate-stub-plugin
│ │ ├── build.gradle.kts
│ │ ├── Dockerfile
│ │ ├── ExampleNeo4jPlugin.java
│ │ ├── Makefile
│ │ ├── README.md
│ │ └── settings.gradle.kts
│ ├── LICENSE
│ ├── Makefile
│ ├── pom.xml
│ ├── publish-neo4j-admin-image.sh
│ ├── publish-neo4j-admin-images.sh
│ ├── README.md
│ └── src
│ ├── main
│ │ └── resources
│ │ └── log4j.properties
│ └── test
│ ├── java
│ │ └── com
│ │ └── neo4j
│ │ └── docker
│ │ ├── coredb
│ │ │ ├── configurations
│ │ │ │ ├── Configuration.java
│ │ │ │ ├── Setting.java
│ │ │ │ ├── TestConfSettings.java
│ │ │ │ ├── TestExtendedConf.java
│ │ │ │ └── TestJVMAdditionalConfig.java
│ │ │ ├── plugins
│ │ │ │ ├── Neo4jPluginEnv.java
│ │ │ │ ├── StubPluginHelper.java
│ │ │ │ ├── TestBundledPluginInstallation.java
│ │ │ │ ├── TestPluginInstallation.java
│ │ │ │ └── TestSemVerPluginMatching.java
│ │ │ ├── TestAdminReport.java
│ │ │ ├── TestAuthentication.java
│ │ │ ├── TestBasic.java
│ │ │ ├── TestCausalCluster.java
│ │ │ ├── TestMounting.java
│ │ │ └── TestUpgrade.java
│ │ ├── neo4jadmin
│ │ │ ├── TestAdminBasic.java
│ │ │ ├── TestBackupRestore.java
│ │ │ ├── TestBackupRestore44.java
│ │ │ ├── TestDumpLoad.java
│ │ │ ├── TestDumpLoad44.java
│ │ │ └── TestReport.java
│ │ ├── TestDeprecationWarning.java
│ │ ├── TestDockerComposeSecrets.java
│ │ └── utils
│ │ ├── DatabaseIO.java
│ │ ├── HostFileHttpHandler.java
│ │ ├── HttpServerTestExtension.java
│ │ ├── Neo4jVersion.java
│ │ ├── Neo4jVersionTest.java
│ │ ├── Network.java
│ │ ├── SetContainerUser.java
│ │ ├── TemporaryFolderManager.java
│ │ ├── TemporaryFolderManagerTest.java
│ │ ├── TestSettings.java
│ │ └── WaitStrategies.java
│ └── resources
│ ├── causal-cluster-compose.yml
│ ├── confs
│ │ ├── before50
│ │ │ ├── ConfsNotOverridden.conf
│ │ │ ├── ConfsReplaced.conf
│ │ │ ├── EnterpriseOnlyNotOverwritten.conf
│ │ │ ├── EnvVarsOverride.conf
│ │ │ ├── ExtendedConf.conf
│ │ │ ├── InvalidExtendedConf.conf
│ │ │ ├── JvmAdditionalNotOverridden.conf
│ │ │ ├── NoNewline.conf
│ │ │ └── ReadConf.conf
│ │ ├── ConfsNotOverridden.conf
│ │ ├── ConfsReplaced.conf
│ │ ├── EnterpriseOnlyNotOverwritten.conf
│ │ ├── EnvVarsOverride.conf
│ │ ├── ExtendedConf.conf
│ │ ├── InvalidExtendedConf.conf
│ │ ├── JvmAdditionalNotOverridden.conf
│ │ ├── NoNewline.conf
│ │ └── ReadConf.conf
│ ├── dockersecrets
│ │ ├── container-compose-with-incorrect-secrets.yml
│ │ ├── container-compose-with-secrets-override.yml
│ │ ├── container-compose-with-secrets.yml
│ │ ├── simple-container-compose-with-external-file-var.yml
│ │ └── simple-container-compose.yml
│ ├── ha-cluster-compose.yml
│ └── stubplugin
│ └── myPlugin.jar
├── pyproject.toml
├── README.md
├── src
│ ├── crawl4ai_mcp.py
│ └── utils.py
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/neo4j/docker-neo4j/docker-image-src/4.3/coredb/docker-entrypoint.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash -eu
2 |
3 | cmd="$1"
4 |
5 | function running_as_root
6 | {
7 | test "$(id -u)" = "0"
8 | }
9 |
10 | function secure_mode_enabled
11 | {
12 | test "${SECURE_FILE_PERMISSIONS:=no}" = "yes"
13 | }
14 |
15 | function containsElement
16 | {
17 | local e match="$1"
18 | shift
19 | for e; do [[ "$e" == "$match" ]] && return 0; done
20 | return 1
21 | }
22 |
23 | function is_readable
24 | {
25 | # this code is fairly ugly but works no matter who this script is running as.
26 | # It would be nice if the writability tests could use this logic somehow.
27 | local _file=${1}
28 | perm=$(stat -c %a "${_file}")
29 |
30 | # everyone permission
31 | if [[ ${perm:2:1} -ge 4 ]]; then
32 | return 0
33 | fi
34 | # owner permissions
35 | if [[ ${perm:0:1} -ge 4 ]]; then
36 | if [[ "$(stat -c %U ${_file})" = "${userid}" ]] || [[ "$(stat -c %u ${_file})" = "${userid}" ]]; then
37 | return 0
38 | fi
39 | fi
40 | # group permissions
41 | if [[ ${perm:1:1} -ge 4 ]]; then
42 | if containsElement "$(stat -c %g ${_file})" "${groups[@]}" || containsElement "$(stat -c %G ${_file})" "${groups[@]}" ; then
43 | return 0
44 | fi
45 | fi
46 | return 1
47 | }
48 |
49 | function is_writable
50 | {
51 | # It would be nice if this and the is_readable function could combine somehow
52 | local _file=${1}
53 | perm=$(stat -c %a "${_file}")
54 |
55 | # everyone permission
56 | if containsElement ${perm:2:1} 2 3 6 7; then
57 | return 0
58 | fi
59 | # owner permissions
60 | if containsElement ${perm:0:1} 2 3 6 7; then
61 | if [[ "$(stat -c %U ${_file})" = "${userid}" ]] || [[ "$(stat -c %u ${_file})" = "${userid}" ]]; then
62 | return 0
63 | fi
64 | fi
65 | # group permissions
66 | if containsElement ${perm:1:1} 2 3 6 7; then
67 | if containsElement "$(stat -c %g ${_file})" "${groups[@]}" || containsElement "$(stat -c %G ${_file})" "${groups[@]}" ; then
68 | return 0
69 | fi
70 | fi
71 | return 1
72 | }
73 |
74 | function expand_commands_optionally
75 | {
76 | if [ "${EXTENDED_CONF+"yes"}" == "yes" ]; then
77 | echo "--expand-commands"
78 | fi
79 | }
80 |
81 | function print_permissions_advice_and_fail
82 | {
83 | _directory=${1}
84 | echo >&2 "
85 | Folder ${_directory} is not accessible for user: ${userid} or group ${groupid} or groups ${groups[@]}, this is commonly a file permissions issue on the mounted folder.
86 |
87 | Hints to solve the issue:
88 | 1) Make sure the folder exists before mounting it. Docker will create the folder using root permissions before starting the Neo4j container. The root permissions disallow Neo4j from writing to the mounted folder.
89 | 2) Pass the folder owner's user ID and group ID to docker run, so that docker runs as that user.
90 | If the folder is owned by the current user, this can be done by adding this flag to your docker run command:
91 | --user=\$(id -u):\$(id -g)
92 | "
93 | exit 1
94 | }
95 |
96 | function check_mounted_folder_readable
97 | {
98 | local _directory=${1}
99 | if ! is_readable "${_directory}"; then
100 | print_permissions_advice_and_fail "${_directory}"
101 | fi
102 | }
103 |
104 | function check_mounted_folder_writable_with_chown
105 | {
106 | # The /data and /log directory are a bit different because they are very likely to be mounted by the user but not
107 | # necessarily writable.
108 | # This depends on whether a user ID is passed to the container and which folders are mounted.
109 | #
110 | # No user ID passed to container:
111 | # 1) No folders are mounted.
112 | # The /data and /log folder are owned by neo4j by default, so should be writable already.
113 | # 2) Both /log and /data are mounted.
114 | # This means on start up, /data and /logs are owned by an unknown user and we should chown them to neo4j for
115 | # backwards compatibility.
116 | #
117 | # User ID passed to container:
118 | # 1) Both /data and /logs are mounted
119 | # The /data and /logs folders are owned by an unknown user but we *should* have rw permission to them.
120 | # That should be verified and error (helpfully) if not.
121 | # 2) User mounts /data or /logs *but not both*
122 | # The unmounted folder is still owned by neo4j, which should already be writable. The mounted folder should
123 | # have rw permissions through user id. This should be verified.
124 | # 3) No folders are mounted.
125 | # The /data and /log folder are owned by neo4j by default, and these are already writable by the user.
126 | # (This is a very unlikely use case).
127 |
128 | local mountFolder=${1}
129 | if running_as_root && ! secure_mode_enabled; then
130 | # check folder permissions
131 | if ! is_writable "${mountFolder}" ; then
132 | # warn that we're about to chown the folder and then chown it
133 | echo "Warning: Folder mounted to \"${mountFolder}\" is not writable from inside container. Changing folder owner to ${userid}."
134 | chown -R "${userid}":"${groupid}" "${mountFolder}"
135 | # check permissions on files in the folder
136 | elif [ $(gosu "${userid}":"${groupid}" find "${mountFolder}" -not -writable | wc -l) -gt 0 ]; then
137 | echo "Warning: Some files inside \"${mountFolder}\" are not writable from inside container. Changing folder owner to ${userid}."
138 | chown -R "${userid}":"${groupid}" "${mountFolder}"
139 | fi
140 | else
141 | if ! is_writable "${mountFolder}"; then
142 | #if [[ ! -w "${mountFolder}" ]] && [[ "$(stat -c %U ${mountFolder})" != "neo4j" ]]; then
143 | echo >&2 "Consider unsetting SECURE_FILE_PERMISSIONS environment variable, to enable docker to write to ${mountFolder}."
144 | print_permissions_advice_and_fail "${mountFolder}"
145 | fi
146 | fi
147 | }
148 |
149 | function load_plugin_from_location
150 | {
151 | # Install a plugin from location at runtime.
152 | local _plugin_name="${1}"
153 | local _location="${2}"
154 |
155 | local _plugins_dir="${NEO4J_HOME}/plugins"
156 | if [ -d /plugins ]; then
157 | local _plugins_dir="/plugins"
158 | fi
159 |
160 | local _destination="${_plugins_dir}/${_plugin_name}.jar"
161 |
162 | # Now we install the plugin that is shipped with Neo4j
163 | for filename in ${_location}; do
164 | echo "Installing Plugin '${_plugin_name}' from ${_location} to ${_destination}"
165 | cp --preserve "${filename}" "${_destination}"
166 | done
167 |
168 | if ! is_readable "${_destination}"; then
169 | echo >&2 "Plugin at '${_destination}' is not readable"
170 | exit 1
171 | fi
172 | }
173 |
174 | function load_plugin_from_github
175 | {
176 | # Load a plugin at runtime. The provided github repository must have a versions.json on the master branch with the
177 | # correct format.
178 | local _plugin_name="${1}" #e.g. apoc, graph-algorithms, graph-ql
179 |
180 | local _plugins_dir="${NEO4J_HOME}/plugins"
181 | if [ -d /plugins ]; then
182 | local _plugins_dir="/plugins"
183 | fi
184 | local _versions_json_url="$(jq --raw-output "with_entries( select(.key==\"${_plugin_name}\") ) | to_entries[] | .value.versions" /startup/neo4j-plugins.json )"
185 | # Using the same name for the plugin irrespective of version ensures we don't end up with different versions of the same plugin
186 | local _destination="${_plugins_dir}/${_plugin_name}.jar"
187 | local _neo4j_version="$(neo4j --version | cut -d' ' -f2)"
188 |
189 | # Now we call out to github to get the versions.json for this plugin and we parse that to find the url for the correct plugin jar for our neo4j version
190 | echo "Fetching versions.json for Plugin '${_plugin_name}' from ${_versions_json_url}"
191 | local _versions_json="$(wget -q --timeout 300 --tries 30 -O - "${_versions_json_url}")"
192 | local _plugin_jar_url="$(echo "${_versions_json}" | jq -L/startup --raw-output "import \"semver\" as lib; [ .[] | select(.neo4j|lib::semver(\"${_neo4j_version}\")) ] | min_by(.neo4j) | .jar")"
193 | if [[ -z "${_plugin_jar_url}" ]]; then
194 | echo >&2 "Error: No jar URL found for version '${_neo4j_version}' in versions.json from '${_versions_json_url}'"
195 | exit 1
196 | fi
197 | echo "Installing Plugin '${_plugin_name}' from ${_plugin_jar_url} to ${_destination} "
198 | wget -q --timeout 300 --tries 30 --output-document="${_destination}" "${_plugin_jar_url}"
199 |
200 | if ! is_readable "${_destination}"; then
201 | echo >&2 "Plugin at '${_destination}' is not readable"
202 | exit 1
203 | fi
204 | }
205 |
206 | function apply_plugin_default_configuration
207 | {
208 | # Set the correct Load a plugin at runtime. The provided github repository must have a versions.json on the master branch with the
209 | # correct format.
210 | local _plugin_name="${1}" #e.g. apoc, graph-algorithms, graph-ql
211 | local _reference_conf="${2}" # used to determine if we can override properties
212 | local _neo4j_conf="${NEO4J_HOME}/conf/neo4j.conf"
213 |
214 | local _property _value
215 | echo "Applying default values for plugin ${_plugin_name} to neo4j.conf"
216 | for _entry in $(jq --compact-output --raw-output "with_entries( select(.key==\"${_plugin_name}\") ) | to_entries[] | .value.properties | to_entries[]" /startup/neo4j-plugins.json); do
217 | _property="$(jq --raw-output '.key' <<< "${_entry}")"
218 | _value="$(jq --raw-output '.value' <<< "${_entry}")"
219 |
220 | # the first grep strips out comments
221 | if grep -o "^[^#]*" "${_reference_conf}" | grep -q --fixed-strings "${_property}=" ; then
222 | # property is already set in the user provided config. In this case we don't override what has been set explicitly by the user.
223 | echo "Skipping ${_property} for plugin ${_plugin_name} because it is already set"
224 | else
225 | if grep -o "^[^#]*" "${_neo4j_conf}" | grep -q --fixed-strings "${_property}=" ; then
226 | sed --in-place "s/${_property}=/&${_value},/" "${_neo4j_conf}"
227 | else
228 | echo "${_property}=${_value}" >> "${_neo4j_conf}"
229 | fi
230 | fi
231 | done
232 | }
233 |
234 | function install_neo4j_labs_plugins
235 | {
236 | # We store a copy of the config before we modify it for the plugins to allow us to see if there are user-set values in the input config that we shouldn't override
237 | local _old_config="$(mktemp)"
238 | cp "${NEO4J_HOME}"/conf/neo4j.conf "${_old_config}"
239 | for plugin_name in $(echo "${NEO4JLABS_PLUGINS}" | jq --raw-output '.[]'); do
240 | local _location="$(jq --raw-output "with_entries( select(.key==\"${plugin_name}\") ) | to_entries[] | .value.location" /startup/neo4j-plugins.json )"
241 | if [ "${_location}" != "null" -a -n "$(shopt -s nullglob; echo ${_location})" ]; then
242 | load_plugin_from_location "${plugin_name}" "${_location}"
243 | else
244 | load_plugin_from_github "${plugin_name}"
245 | fi
246 | apply_plugin_default_configuration "${plugin_name}" "${_old_config}"
247 | done
248 | rm "${_old_config}"
249 | }
250 |
251 | function add_docker_default_to_conf
252 | {
253 | # docker defaults should NOT overwrite values already in the conf file
254 | local _setting="${1}"
255 | local _value="${2}"
256 | local _neo4j_home="${3}"
257 |
258 | if ! grep -q "^${_setting}=" "${_neo4j_home}"/conf/neo4j.conf
259 | then
260 | echo -e "\n"${_setting}=${_value} >> "${_neo4j_home}"/conf/neo4j.conf
261 | fi
262 | }
263 |
264 | function add_env_setting_to_conf
265 | {
266 | # settings from environment variables should overwrite values already in the conf
267 | local _setting=${1}
268 | local _value=${2}
269 | local _neo4j_home=${3}
270 |
271 | if grep -q -F "${_setting}=" "${_neo4j_home}"/conf/neo4j.conf; then
272 | # Remove any lines containing the setting already
273 | sed --in-place "/^${_setting}=.*/d" "${_neo4j_home}"/conf/neo4j.conf
274 | fi
275 | # Then always append setting to file
276 | echo "${_setting}=${_value}" >> "${_neo4j_home}"/conf/neo4j.conf
277 | }
278 |
279 | function set_initial_password
280 | {
281 | local _neo4j_auth="${1}"
282 |
283 | # set the neo4j initial password only if you run the database server
284 | if [ "${cmd}" == "neo4j" ]; then
285 | if [ "${_neo4j_auth:-}" == "none" ]; then
286 | add_env_setting_to_conf "dbms.security.auth_enabled" "false" "${NEO4J_HOME}"
287 | # NEO4J_dbms_security_auth__enabled=false
288 | elif [[ "${_neo4j_auth:-}" =~ ^([^/]+)\/([^/]+)/?([tT][rR][uU][eE])?$ ]]; then
289 | admin_user="${BASH_REMATCH[1]}"
290 | password="${BASH_REMATCH[2]}"
291 | do_reset="${BASH_REMATCH[3]}"
292 |
293 | if [ "${password}" == "neo4j" ]; then
294 | echo >&2 "Invalid value for password. It cannot be 'neo4j', which is the default."
295 | exit 1
296 | fi
297 | if [ "${admin_user}" != "neo4j" ]; then
298 | echo >&2 "Invalid admin username, it must be neo4j"
299 | exit 1
300 | fi
301 |
302 | if running_as_root; then
303 | # running set-initial-password as root will create subfolders to /data as root, causing startup fail when neo4j can't read or write the /data/dbms folder
304 | # creating the folder first will avoid that
305 | mkdir -p /data/dbms
306 | chown "${userid}":"${groupid}" /data/dbms
307 | fi
308 |
309 | # Will exit with error if users already exist (and print a message explaining that)
310 | # we probably don't want the message though, since it throws an error message on restarting the container.
311 | if [ "${do_reset}" == "true" ]; then
312 | ${neo4j_admin_cmd} set-initial-password "${password}" --require-password-change $(expand_commands_optionally) 2>/dev/null || true
313 | else
314 | ${neo4j_admin_cmd} set-initial-password "${password}" $(expand_commands_optionally) 2>/dev/null || true
315 | fi
316 | elif [ -n "${_neo4j_auth:-}" ]; then
317 | echo "$_neo4j_auth is invalid"
318 | echo >&2 "Invalid value for NEO4J_AUTH: '${_neo4j_auth}'"
319 | exit 1
320 | fi
321 | fi
322 | }
323 |
324 | # If we're running as root, then run as the neo4j user. Otherwise
325 | # docker is running with --user and we simply use that user. Note
326 | # that su-exec, despite its name, does not replicate the functionality
327 | # of exec, so we need to use both
328 | if running_as_root; then
329 | userid="neo4j"
330 | groupid="neo4j"
331 | groups=($(id -G neo4j))
332 | exec_cmd="exec gosu neo4j:neo4j"
333 | neo4j_admin_cmd="gosu neo4j:neo4j neo4j-admin"
334 | else
335 | userid="$(id -u)"
336 | groupid="$(id -g)"
337 | groups=($(id -G))
338 | exec_cmd="exec"
339 | neo4j_admin_cmd="neo4j-admin"
340 | fi
341 | readonly userid
342 | readonly groupid
343 | readonly groups
344 | readonly exec_cmd
345 | readonly neo4j_admin_cmd
346 |
347 |
348 | # Need to chown the home directory
349 | if running_as_root; then
350 | chown -R "${userid}":"${groupid}" "${NEO4J_HOME}"
351 | chmod 700 "${NEO4J_HOME}"
352 | find "${NEO4J_HOME}" -mindepth 1 -maxdepth 1 -type d -exec chmod -R 700 {} \;
353 | find "${NEO4J_HOME}"/conf -type f -exec chmod -R 600 {} \;
354 | fi
355 |
356 | # ==== CHECK LICENSE AGREEMENT ====
357 |
358 | # Only prompt for license agreement if command contains "neo4j" in it
359 | if [[ "${cmd}" == *"neo4j"* ]]; then
360 | if [ "${NEO4J_EDITION}" == "enterprise" ]; then
361 | if [ "${NEO4J_ACCEPT_LICENSE_AGREEMENT:=no}" != "yes" ]; then
362 | echo >&2 "
363 | In order to use Neo4j Enterprise Edition you must accept the license agreement.
364 |
365 | (c) Neo4j Sweden AB. 2022. All Rights Reserved.
366 | Use of this Software without a proper commercial license with Neo4j,
367 | Inc. or its affiliates is prohibited.
368 |
369 | Email inquiries can be directed to: [email protected]
370 |
371 | More information is also available at: https://neo4j.com/licensing/
372 |
373 |
374 | To accept the license agreement set the environment variable
375 | NEO4J_ACCEPT_LICENSE_AGREEMENT=yes
376 |
377 | To do this you can use the following docker argument:
378 |
379 | --env=NEO4J_ACCEPT_LICENSE_AGREEMENT=yes
380 | "
381 | exit 1
382 | fi
383 | fi
384 | fi
385 |
386 | # ==== RENAME LEGACY ENVIRONMENT CONF VARIABLES ====
387 |
388 | # Env variable naming convention:
389 | # - prefix NEO4J_
390 | # - double underscore char '__' instead of single underscore '_' char in the setting name
391 | # - underscore char '_' instead of dot '.' char in the setting name
392 | # Example:
393 | # NEO4J_dbms_tx__log_rotation_retention__policy env variable to set
394 | # dbms.tx_log.rotation.retention_policy setting
395 |
396 | # Backward compatibility - map old hardcoded env variables into new naming convention (if they aren't set already)
397 | # Set some to default values if unset
398 | : ${NEO4J_dbms_tx__log_rotation_retention__policy:=${NEO4J_dbms_txLog_rotation_retentionPolicy:-}}
399 | : ${NEO4J_dbms_unmanaged__extension__classes:=${NEO4J_dbms_unmanagedExtensionClasses:-}}
400 | : ${NEO4J_dbms_allow__format__migration:=${NEO4J_dbms_allowFormatMigration:-}}
401 | : ${NEO4J_dbms_connectors_default__advertised__address:=${NEO4J_dbms_connectors_defaultAdvertisedAddress:-}}
402 |
403 | if [ "${NEO4J_EDITION}" == "enterprise" ];
404 | then
405 | : ${NEO4J_causal__clustering_expected__core__cluster__size:=${NEO4J_causalClustering_expectedCoreClusterSize:-}}
406 | : ${NEO4J_causal__clustering_initial__discovery__members:=${NEO4J_causalClustering_initialDiscoveryMembers:-}}
407 | : ${NEO4J_causal__clustering_discovery__advertised__address:=${NEO4J_causalClustering_discoveryAdvertisedAddress:-}}
408 | : ${NEO4J_causal__clustering_transaction__advertised__address:=${NEO4J_causalClustering_transactionAdvertisedAddress:-}}
409 | : ${NEO4J_causal__clustering_raft__advertised__address:=${NEO4J_causalClustering_raftAdvertisedAddress:-}}
410 | fi
411 |
412 | # unset old hardcoded unsupported env variables
413 | unset NEO4J_dbms_txLog_rotation_retentionPolicy NEO4J_UDC_SOURCE \
414 | NEO4J_dbms_unmanagedExtensionClasses NEO4J_dbms_allowFormatMigration \
415 | NEO4J_dbms_connectors_defaultAdvertisedAddress NEO4J_ha_serverId \
416 | NEO4J_ha_initialHosts NEO4J_causalClustering_expectedCoreClusterSize \
417 | NEO4J_causalClustering_initialDiscoveryMembers \
418 | NEO4J_causalClustering_discoveryListenAddress \
419 | NEO4J_causalClustering_discoveryAdvertisedAddress \
420 | NEO4J_causalClustering_transactionListenAddress \
421 | NEO4J_causalClustering_transactionAdvertisedAddress \
422 | NEO4J_causalClustering_raftListenAddress \
423 | NEO4J_causalClustering_raftAdvertisedAddress
424 |
425 | # ==== CHECK FILE PERMISSIONS ON MOUNTED FOLDERS ====
426 |
427 |
428 | if [ -d /conf ]; then
429 | check_mounted_folder_readable "/conf"
430 | rm -rf "${NEO4J_HOME}"/conf/*
431 | find /conf -type f -exec cp --preserve=ownership,mode {} "${NEO4J_HOME}"/conf \;
432 | fi
433 |
434 | if [ -d /ssl ]; then
435 | check_mounted_folder_readable "/ssl"
436 | rm -rf "${NEO4J_HOME}"/certificates
437 | ln -s /ssl "${NEO4J_HOME}"/certificates
438 | fi
439 |
440 | if [ -d /plugins ]; then
441 | if [[ -n "${NEO4JLABS_PLUGINS:-}" ]]; then
442 | # We need write permissions
443 | check_mounted_folder_writable_with_chown "/plugins"
444 | fi
445 | check_mounted_folder_readable "/plugins"
446 | : ${NEO4J_dbms_directories_plugins:="/plugins"}
447 | fi
448 |
449 | if [ -d /import ]; then
450 | check_mounted_folder_readable "/import"
451 | : ${NEO4J_dbms_directories_import:="/import"}
452 | fi
453 |
454 | if [ -d /metrics ]; then
455 | # metrics is enterprise only
456 | if [ "${NEO4J_EDITION}" == "enterprise" ];
457 | then
458 | check_mounted_folder_writable_with_chown "/metrics"
459 | : ${NEO4J_dbms_directories_metrics:="/metrics"}
460 | fi
461 | fi
462 |
463 | if [ -d /logs ]; then
464 | check_mounted_folder_writable_with_chown "/logs"
465 | : ${NEO4J_dbms_directories_logs:="/logs"}
466 | fi
467 |
468 | if [ -d /data ]; then
469 | check_mounted_folder_writable_with_chown "/data"
470 | if [ -d /data/databases ]; then
471 | check_mounted_folder_writable_with_chown "/data/databases"
472 | fi
473 | if [ -d /data/dbms ]; then
474 | check_mounted_folder_writable_with_chown "/data/dbms"
475 | fi
476 | if [ -d /data/transactions ]; then
477 | check_mounted_folder_writable_with_chown "/data/transactions"
478 | fi
479 | fi
480 |
481 | if [ -d /licenses ]; then
482 | check_mounted_folder_readable "/licenses"
483 | : ${NEO4J_dbms_directories_licenses:="/licenses"}
484 | fi
485 |
486 | # ==== SET CONFIGURATIONS ====
487 |
488 | ## == DOCKER SPECIFIC DEFAULT CONFIGURATIONS ===
489 | ## these should not override *any* configurations set by the user
490 |
491 | add_docker_default_to_conf "dbms.tx_log.rotation.retention_policy" "100M size" "${NEO4J_HOME}"
492 | add_docker_default_to_conf "dbms.memory.pagecache.size" "512M" "${NEO4J_HOME}"
493 | add_docker_default_to_conf "dbms.default_listen_address" "0.0.0.0" "${NEO4J_HOME}"
494 | # set enterprise only docker defaults
495 | if [ "${NEO4J_EDITION}" == "enterprise" ];
496 | then
497 | add_docker_default_to_conf "causal_clustering.discovery_advertised_address" "$(hostname):5000" "${NEO4J_HOME}"
498 | add_docker_default_to_conf "causal_clustering.transaction_advertised_address" "$(hostname):6000" "${NEO4J_HOME}"
499 | add_docker_default_to_conf "causal_clustering.raft_advertised_address" "$(hostname):7000" "${NEO4J_HOME}"
500 | fi
501 |
502 | ## == ENVIRONMENT VARIABLE CONFIGURATIONS ===
503 | ## these override BOTH defaults and any existing values in the neo4j.conf file
504 |
505 | # save NEO4J_HOME and NEO4J_AUTH to temp variables that don't begin with NEO4J_ so they don't get added to the conf
506 | temp_neo4j_home="${NEO4J_HOME}"
507 | temp_neo4j_auth="${NEO4J_AUTH:-}"
508 | # list env variables with prefix NEO4J_ and create settings from them
509 | unset NEO4J_AUTH NEO4J_SHA256 NEO4J_TARBALL NEO4J_EDITION NEO4J_ACCEPT_LICENSE_AGREEMENT NEO4J_HOME
510 | for i in $( set | grep ^NEO4J_ | awk -F'=' '{print $1}' | sort -rn ); do
511 | setting=$(echo "${i}" | sed 's|^NEO4J_||' | sed 's|_|.|g' | sed 's|\.\.|_|g')
512 | value=$(echo "${!i}")
513 | # Don't allow settings with no value or settings that start with a number (neo4j converts settings to env variables and you cannot have an env variable that starts with a number)
514 | if [[ -n ${value} ]]; then
515 | if [[ ! "${setting}" =~ ^[0-9]+.*$ ]]; then
516 | add_env_setting_to_conf "${setting}" "${value}" "${temp_neo4j_home}"
517 | else
518 | echo >&2 "WARNING: ${setting} not written to conf file because settings that start with a number are not permitted"
519 | fi
520 | fi
521 | done
522 | export NEO4J_HOME="${temp_neo4j_home}"
523 | unset temp_neo4j_home
524 |
525 | # ==== SET PASSWORD AND PLUGINS ====
526 |
527 | set_initial_password "${temp_neo4j_auth}"
528 |
529 |
530 | if [[ ! -z "${NEO4JLABS_PLUGINS:-}" ]]; then
531 | # NEO4JLABS_PLUGINS should be a json array of plugins like '["graph-algorithms", "apoc", "streams", "graphql"]'
532 | install_neo4j_labs_plugins
533 | fi
534 |
535 | # ==== INVOKE NEO4J STARTUP ====
536 |
537 | [ -f "${EXTENSION_SCRIPT:-}" ] && . ${EXTENSION_SCRIPT}
538 |
539 | if [ "${cmd}" == "dump-config" ]; then
540 | if [ ! -d "/conf" ]; then
541 | echo >&2 "You must mount a folder to /conf so that the configuration file(s) can be dumped to there."
542 | exit 1
543 | fi
544 | check_mounted_folder_writable_with_chown "/conf"
545 | cp --recursive "${NEO4J_HOME}"/conf/* /conf
546 | echo "Config Dumped"
547 | exit 0
548 | fi
549 |
550 | # this prints out a command for us to run.
551 | # the command is something like: `java ...[lots of java options]... neo4j.mainClass ...[some neo4j options]...`
552 | function get_neo4j_run_cmd {
553 |
554 | local extraArgs=()
555 |
556 | if [ "${EXTENDED_CONF+"yes"}" == "yes" ]; then
557 | extraArgs+=("--expand-commands")
558 | fi
559 |
560 | if running_as_root; then
561 | gosu neo4j:neo4j neo4j console --dry-run "${extraArgs[@]}"
562 | else
563 | neo4j console --dry-run "${extraArgs[@]}"
564 | fi
565 | }
566 |
567 | # Use su-exec to drop privileges to neo4j user
568 | # Note that su-exec, despite its name, does not replicate the
569 | # functionality of exec, so we need to use both
570 | if [ "${cmd}" == "neo4j" ]; then
571 | # separate declaration and use of get_neo4j_run_cmd so that error codes are correctly surfaced
572 | neo4j_console_cmd="$(get_neo4j_run_cmd)"
573 | eval ${exec_cmd} ${neo4j_console_cmd?:No Neo4j command was generated}
574 | else
575 | ${exec_cmd} "$@"
576 | fi
577 |
```
--------------------------------------------------------------------------------
/knowledge_graphs/ai_script_analyzer.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | AI Script Analyzer
3 |
4 | Parses Python scripts generated by AI coding assistants using AST to extract:
5 | - Import statements and their usage
6 | - Class instantiations and method calls
7 | - Function calls with parameters
8 | - Attribute access patterns
9 | - Variable type tracking
10 | """
11 |
12 | import ast
13 | import logging
14 | from pathlib import Path
15 | from typing import Dict, List, Set, Any, Optional, Tuple
16 | from dataclasses import dataclass, field
17 |
18 | logger = logging.getLogger(__name__)
19 |
20 |
21 | @dataclass
22 | class ImportInfo:
23 | """Information about an import statement"""
24 | module: str
25 | name: str
26 | alias: Optional[str] = None
27 | is_from_import: bool = False
28 | line_number: int = 0
29 |
30 |
31 | @dataclass
32 | class MethodCall:
33 | """Information about a method call"""
34 | object_name: str
35 | method_name: str
36 | args: List[str]
37 | kwargs: Dict[str, str]
38 | line_number: int
39 | object_type: Optional[str] = None # Inferred class type
40 |
41 |
42 | @dataclass
43 | class AttributeAccess:
44 | """Information about attribute access"""
45 | object_name: str
46 | attribute_name: str
47 | line_number: int
48 | object_type: Optional[str] = None # Inferred class type
49 |
50 |
51 | @dataclass
52 | class FunctionCall:
53 | """Information about a function call"""
54 | function_name: str
55 | args: List[str]
56 | kwargs: Dict[str, str]
57 | line_number: int
58 | full_name: Optional[str] = None # Module.function_name
59 |
60 |
61 | @dataclass
62 | class ClassInstantiation:
63 | """Information about class instantiation"""
64 | variable_name: str
65 | class_name: str
66 | args: List[str]
67 | kwargs: Dict[str, str]
68 | line_number: int
69 | full_class_name: Optional[str] = None # Module.ClassName
70 |
71 |
72 | @dataclass
73 | class AnalysisResult:
74 | """Complete analysis results for a Python script"""
75 | file_path: str
76 | imports: List[ImportInfo] = field(default_factory=list)
77 | class_instantiations: List[ClassInstantiation] = field(default_factory=list)
78 | method_calls: List[MethodCall] = field(default_factory=list)
79 | attribute_accesses: List[AttributeAccess] = field(default_factory=list)
80 | function_calls: List[FunctionCall] = field(default_factory=list)
81 | variable_types: Dict[str, str] = field(default_factory=dict) # variable_name -> class_type
82 | errors: List[str] = field(default_factory=list)
83 |
84 |
85 | class AIScriptAnalyzer:
86 | """Analyzes AI-generated Python scripts for validation against knowledge graph"""
87 |
88 | def __init__(self):
89 | self.import_map: Dict[str, str] = {} # alias -> actual_module_name
90 | self.variable_types: Dict[str, str] = {} # variable_name -> class_type
91 | self.context_manager_vars: Dict[str, Tuple[int, int, str]] = {} # var_name -> (start_line, end_line, type)
92 |
93 | def analyze_script(self, script_path: str) -> AnalysisResult:
94 | """Analyze a Python script and extract all relevant information"""
95 | try:
96 | with open(script_path, 'r', encoding='utf-8') as f:
97 | content = f.read()
98 |
99 | tree = ast.parse(content)
100 | result = AnalysisResult(file_path=script_path)
101 |
102 | # Reset state for new analysis
103 | self.import_map.clear()
104 | self.variable_types.clear()
105 | self.context_manager_vars.clear()
106 |
107 | # Track processed nodes to avoid duplicates
108 | self.processed_calls = set()
109 | self.method_call_attributes = set()
110 |
111 | # First pass: collect imports and build import map
112 | for node in ast.walk(tree):
113 | if isinstance(node, (ast.Import, ast.ImportFrom)):
114 | self._extract_imports(node, result)
115 |
116 | # Second pass: analyze usage patterns
117 | for node in ast.walk(tree):
118 | self._analyze_node(node, result)
119 |
120 | # Set inferred types on method calls and attribute accesses
121 | self._infer_object_types(result)
122 |
123 | result.variable_types = self.variable_types.copy()
124 |
125 | return result
126 |
127 | except Exception as e:
128 | error_msg = f"Failed to analyze script {script_path}: {str(e)}"
129 | logger.error(error_msg)
130 | result = AnalysisResult(file_path=script_path)
131 | result.errors.append(error_msg)
132 | return result
133 |
134 | def _extract_imports(self, node: ast.AST, result: AnalysisResult):
135 | """Extract import information and build import mapping"""
136 | line_num = getattr(node, 'lineno', 0)
137 |
138 | if isinstance(node, ast.Import):
139 | for alias in node.names:
140 | import_name = alias.name
141 | alias_name = alias.asname or import_name
142 |
143 | result.imports.append(ImportInfo(
144 | module=import_name,
145 | name=import_name,
146 | alias=alias.asname,
147 | is_from_import=False,
148 | line_number=line_num
149 | ))
150 |
151 | self.import_map[alias_name] = import_name
152 |
153 | elif isinstance(node, ast.ImportFrom):
154 | module = node.module or ""
155 | for alias in node.names:
156 | import_name = alias.name
157 | alias_name = alias.asname or import_name
158 |
159 | result.imports.append(ImportInfo(
160 | module=module,
161 | name=import_name,
162 | alias=alias.asname,
163 | is_from_import=True,
164 | line_number=line_num
165 | ))
166 |
167 | # Map alias to full module.name
168 | if module:
169 | full_name = f"{module}.{import_name}"
170 | self.import_map[alias_name] = full_name
171 | else:
172 | self.import_map[alias_name] = import_name
173 |
174 | def _analyze_node(self, node: ast.AST, result: AnalysisResult):
175 | """Analyze individual AST nodes for usage patterns"""
176 | line_num = getattr(node, 'lineno', 0)
177 |
178 | # Assignments (class instantiations and method call results)
179 | if isinstance(node, ast.Assign):
180 | if len(node.targets) == 1 and isinstance(node.targets[0], ast.Name):
181 | if isinstance(node.value, ast.Call):
182 | # Check if it's a class instantiation or method call
183 | if isinstance(node.value.func, ast.Name):
184 | # Direct function/class call
185 | self._extract_class_instantiation(node, result)
186 | # Mark this call as processed to avoid duplicate processing
187 | self.processed_calls.add(id(node.value))
188 | elif isinstance(node.value.func, ast.Attribute):
189 | # Method call - track the variable assignment for type inference
190 | var_name = node.targets[0].id
191 | self._track_method_result_assignment(node.value, var_name)
192 | # Still process the method call
193 | self._extract_method_call(node.value, result)
194 | self.processed_calls.add(id(node.value))
195 |
196 | # AsyncWith statements (context managers)
197 | elif isinstance(node, ast.AsyncWith):
198 | self._handle_async_with(node, result)
199 | elif isinstance(node, ast.With):
200 | self._handle_with(node, result)
201 |
202 | # Method calls and function calls
203 | elif isinstance(node, ast.Call):
204 | # Skip if this call was already processed as part of an assignment
205 | if id(node) in self.processed_calls:
206 | return
207 |
208 | if isinstance(node.func, ast.Attribute):
209 | self._extract_method_call(node, result)
210 | # Mark this attribute as used in method call to avoid duplicate processing
211 | self.method_call_attributes.add(id(node.func))
212 | elif isinstance(node.func, ast.Name):
213 | # Check if this is likely a class instantiation (based on imported classes)
214 | func_name = node.func.id
215 | full_name = self._resolve_full_name(func_name)
216 |
217 | # If this is a known imported class, treat as class instantiation
218 | if self._is_likely_class_instantiation(func_name, full_name):
219 | self._extract_nested_class_instantiation(node, result)
220 | else:
221 | self._extract_function_call(node, result)
222 |
223 | # Attribute access (not in call context)
224 | elif isinstance(node, ast.Attribute):
225 | # Skip if this attribute was already processed as part of a method call
226 | if id(node) in self.method_call_attributes:
227 | return
228 | self._extract_attribute_access(node, result)
229 |
230 | def _extract_class_instantiation(self, node: ast.Assign, result: AnalysisResult):
231 | """Extract class instantiation from assignment"""
232 | target = node.targets[0]
233 | call = node.value
234 | line_num = getattr(node, 'lineno', 0)
235 |
236 | if isinstance(target, ast.Name) and isinstance(call, ast.Call):
237 | var_name = target.id
238 | class_name = self._get_name_from_call(call.func)
239 |
240 | if class_name:
241 | args = [self._get_arg_representation(arg) for arg in call.args]
242 | kwargs = {
243 | kw.arg: self._get_arg_representation(kw.value)
244 | for kw in call.keywords if kw.arg
245 | }
246 |
247 | # Resolve full class name using import map
248 | full_class_name = self._resolve_full_name(class_name)
249 |
250 | instantiation = ClassInstantiation(
251 | variable_name=var_name,
252 | class_name=class_name,
253 | args=args,
254 | kwargs=kwargs,
255 | line_number=line_num,
256 | full_class_name=full_class_name
257 | )
258 |
259 | result.class_instantiations.append(instantiation)
260 |
261 | # Track variable type for later method call analysis
262 | self.variable_types[var_name] = full_class_name or class_name
263 |
264 | def _extract_method_call(self, node: ast.Call, result: AnalysisResult):
265 | """Extract method call information"""
266 | if isinstance(node.func, ast.Attribute):
267 | line_num = getattr(node, 'lineno', 0)
268 |
269 | # Get object and method names
270 | obj_name = self._get_name_from_node(node.func.value)
271 | method_name = node.func.attr
272 |
273 | if obj_name and method_name:
274 | args = [self._get_arg_representation(arg) for arg in node.args]
275 | kwargs = {
276 | kw.arg: self._get_arg_representation(kw.value)
277 | for kw in node.keywords if kw.arg
278 | }
279 |
280 | method_call = MethodCall(
281 | object_name=obj_name,
282 | method_name=method_name,
283 | args=args,
284 | kwargs=kwargs,
285 | line_number=line_num,
286 | object_type=self.variable_types.get(obj_name)
287 | )
288 |
289 | result.method_calls.append(method_call)
290 |
291 | def _extract_function_call(self, node: ast.Call, result: AnalysisResult):
292 | """Extract function call information"""
293 | if isinstance(node.func, ast.Name):
294 | line_num = getattr(node, 'lineno', 0)
295 | func_name = node.func.id
296 |
297 | args = [self._get_arg_representation(arg) for arg in node.args]
298 | kwargs = {
299 | kw.arg: self._get_arg_representation(kw.value)
300 | for kw in node.keywords if kw.arg
301 | }
302 |
303 | # Resolve full function name using import map
304 | full_func_name = self._resolve_full_name(func_name)
305 |
306 | function_call = FunctionCall(
307 | function_name=func_name,
308 | args=args,
309 | kwargs=kwargs,
310 | line_number=line_num,
311 | full_name=full_func_name
312 | )
313 |
314 | result.function_calls.append(function_call)
315 |
316 | def _extract_attribute_access(self, node: ast.Attribute, result: AnalysisResult):
317 | """Extract attribute access information"""
318 | line_num = getattr(node, 'lineno', 0)
319 |
320 | obj_name = self._get_name_from_node(node.value)
321 | attr_name = node.attr
322 |
323 | if obj_name and attr_name:
324 | attribute_access = AttributeAccess(
325 | object_name=obj_name,
326 | attribute_name=attr_name,
327 | line_number=line_num,
328 | object_type=self.variable_types.get(obj_name)
329 | )
330 |
331 | result.attribute_accesses.append(attribute_access)
332 |
333 | def _infer_object_types(self, result: AnalysisResult):
334 | """Update object types for method calls and attribute accesses"""
335 | for method_call in result.method_calls:
336 | if not method_call.object_type:
337 | # First check context manager variables
338 | obj_type = self._get_context_aware_type(method_call.object_name, method_call.line_number)
339 | if obj_type:
340 | method_call.object_type = obj_type
341 | else:
342 | method_call.object_type = self.variable_types.get(method_call.object_name)
343 |
344 | for attr_access in result.attribute_accesses:
345 | if not attr_access.object_type:
346 | # First check context manager variables
347 | obj_type = self._get_context_aware_type(attr_access.object_name, attr_access.line_number)
348 | if obj_type:
349 | attr_access.object_type = obj_type
350 | else:
351 | attr_access.object_type = self.variable_types.get(attr_access.object_name)
352 |
353 | def _get_context_aware_type(self, var_name: str, line_number: int) -> Optional[str]:
354 | """Get the type of a variable considering its context (e.g., async with scope)"""
355 | if var_name in self.context_manager_vars:
356 | start_line, end_line, var_type = self.context_manager_vars[var_name]
357 | if start_line <= line_number <= end_line:
358 | return var_type
359 | return None
360 |
361 | def _get_name_from_call(self, node: ast.AST) -> Optional[str]:
362 | """Get the name from a call node (for class instantiation)"""
363 | if isinstance(node, ast.Name):
364 | return node.id
365 | elif isinstance(node, ast.Attribute):
366 | value_name = self._get_name_from_node(node.value)
367 | if value_name:
368 | return f"{value_name}.{node.attr}"
369 | return None
370 |
371 | def _get_name_from_node(self, node: ast.AST) -> Optional[str]:
372 | """Get string representation of a node (for object names)"""
373 | if isinstance(node, ast.Name):
374 | return node.id
375 | elif isinstance(node, ast.Attribute):
376 | value_name = self._get_name_from_node(node.value)
377 | if value_name:
378 | return f"{value_name}.{node.attr}"
379 | return None
380 |
381 | def _get_arg_representation(self, node: ast.AST) -> str:
382 | """Get string representation of an argument"""
383 | if isinstance(node, ast.Constant):
384 | return repr(node.value)
385 | elif isinstance(node, ast.Name):
386 | return node.id
387 | elif isinstance(node, ast.Attribute):
388 | return self._get_name_from_node(node) or "<?>"
389 | elif isinstance(node, ast.Call):
390 | func_name = self._get_name_from_call(node.func)
391 | return f"{func_name}(...)" if func_name else "call(...)"
392 | else:
393 | return f"<{type(node).__name__}>"
394 |
395 | def _is_likely_class_instantiation(self, func_name: str, full_name: Optional[str]) -> bool:
396 | """Determine if a function call is likely a class instantiation"""
397 | # Check if it's a known imported class (classes typically start with uppercase)
398 | if func_name and func_name[0].isupper():
399 | return True
400 |
401 | # Check if the full name suggests a class (contains known class patterns)
402 | if full_name:
403 | # Common class patterns in module names
404 | class_patterns = [
405 | 'Model', 'Provider', 'Client', 'Agent', 'Manager', 'Handler',
406 | 'Builder', 'Factory', 'Service', 'Controller', 'Processor'
407 | ]
408 | return any(pattern in full_name for pattern in class_patterns)
409 |
410 | return False
411 |
412 | def _extract_nested_class_instantiation(self, node: ast.Call, result: AnalysisResult):
413 | """Extract class instantiation that's not in direct assignment (e.g., as parameter)"""
414 | line_num = getattr(node, 'lineno', 0)
415 |
416 | if isinstance(node.func, ast.Name):
417 | class_name = node.func.id
418 |
419 | args = [self._get_arg_representation(arg) for arg in node.args]
420 | kwargs = {
421 | kw.arg: self._get_arg_representation(kw.value)
422 | for kw in node.keywords if kw.arg
423 | }
424 |
425 | # Resolve full class name using import map
426 | full_class_name = self._resolve_full_name(class_name)
427 |
428 | # Use a synthetic variable name since this isn't assigned to a variable
429 | var_name = f"<{class_name.lower()}_instance>"
430 |
431 | instantiation = ClassInstantiation(
432 | variable_name=var_name,
433 | class_name=class_name,
434 | args=args,
435 | kwargs=kwargs,
436 | line_number=line_num,
437 | full_class_name=full_class_name
438 | )
439 |
440 | result.class_instantiations.append(instantiation)
441 |
442 | def _track_method_result_assignment(self, call_node: ast.Call, var_name: str):
443 | """Track when a variable is assigned the result of a method call"""
444 | if isinstance(call_node.func, ast.Attribute):
445 | # For now, we'll use a generic type hint for method results
446 | # In a more sophisticated system, we could look up the return type
447 | self.variable_types[var_name] = "method_result"
448 |
449 | def _handle_async_with(self, node: ast.AsyncWith, result: AnalysisResult):
450 | """Handle async with statements and track context manager variables"""
451 | for item in node.items:
452 | if item.optional_vars and isinstance(item.optional_vars, ast.Name):
453 | var_name = item.optional_vars.id
454 |
455 | # If the context manager is a method call, track the result type
456 | if isinstance(item.context_expr, ast.Call) and isinstance(item.context_expr.func, ast.Attribute):
457 | # Extract and process the method call
458 | self._extract_method_call(item.context_expr, result)
459 | self.processed_calls.add(id(item.context_expr))
460 |
461 | # Track context manager scope for pydantic_ai run_stream calls
462 | obj_name = self._get_name_from_node(item.context_expr.func.value)
463 | method_name = item.context_expr.func.attr
464 |
465 | if (obj_name and obj_name in self.variable_types and
466 | 'pydantic_ai' in str(self.variable_types[obj_name]) and
467 | method_name == 'run_stream'):
468 |
469 | # Calculate the scope of this async with block
470 | start_line = getattr(node, 'lineno', 0)
471 | end_line = getattr(node, 'end_lineno', start_line + 50) # fallback estimate
472 |
473 | # For run_stream, the return type is specifically StreamedRunResult
474 | # This is the actual return type, not a generic placeholder
475 | self.context_manager_vars[var_name] = (start_line, end_line, "pydantic_ai.StreamedRunResult")
476 |
477 | def _handle_with(self, node: ast.With, result: AnalysisResult):
478 | """Handle regular with statements and track context manager variables"""
479 | for item in node.items:
480 | if item.optional_vars and isinstance(item.optional_vars, ast.Name):
481 | var_name = item.optional_vars.id
482 |
483 | # If the context manager is a method call, track the result type
484 | if isinstance(item.context_expr, ast.Call) and isinstance(item.context_expr.func, ast.Attribute):
485 | # Extract and process the method call
486 | self._extract_method_call(item.context_expr, result)
487 | self.processed_calls.add(id(item.context_expr))
488 |
489 | # Track basic type information
490 | self.variable_types[var_name] = "context_manager_result"
491 |
492 | def _resolve_full_name(self, name: str) -> Optional[str]:
493 | """Resolve a name to its full module.name using import map"""
494 | # Check if it's a direct import mapping
495 | if name in self.import_map:
496 | return self.import_map[name]
497 |
498 | # Check if it's a dotted name with first part in import map
499 | parts = name.split('.')
500 | if len(parts) > 1 and parts[0] in self.import_map:
501 | base_module = self.import_map[parts[0]]
502 | return f"{base_module}.{'.'.join(parts[1:])}"
503 |
504 | return None
505 |
506 |
507 | def analyze_ai_script(script_path: str) -> AnalysisResult:
508 | """Convenience function to analyze a single AI-generated script"""
509 | analyzer = AIScriptAnalyzer()
510 | return analyzer.analyze_script(script_path)
511 |
512 |
513 | if __name__ == "__main__":
514 | # Example usage
515 | import sys
516 |
517 | if len(sys.argv) != 2:
518 | print("Usage: python ai_script_analyzer.py <script_path>")
519 | sys.exit(1)
520 |
521 | script_path = sys.argv[1]
522 | result = analyze_ai_script(script_path)
523 |
524 | print(f"Analysis Results for: {result.file_path}")
525 | print(f"Imports: {len(result.imports)}")
526 | print(f"Class Instantiations: {len(result.class_instantiations)}")
527 | print(f"Method Calls: {len(result.method_calls)}")
528 | print(f"Function Calls: {len(result.function_calls)}")
529 | print(f"Attribute Accesses: {len(result.attribute_accesses)}")
530 |
531 | if result.errors:
532 | print(f"Errors: {result.errors}")
```
--------------------------------------------------------------------------------
/knowledge_graphs/hallucination_reporter.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Hallucination Reporter
3 |
4 | Generates comprehensive reports about AI coding assistant hallucinations
5 | detected in Python scripts. Supports multiple output formats.
6 | """
7 |
8 | import json
9 | import logging
10 | from datetime import datetime, timezone
11 | from pathlib import Path
12 | from typing import Dict, List, Any, Optional
13 |
14 | from knowledge_graph_validator import (
15 | ScriptValidationResult, ValidationStatus, ValidationResult
16 | )
17 |
18 | logger = logging.getLogger(__name__)
19 |
20 |
21 | class HallucinationReporter:
22 | """Generates reports about detected hallucinations"""
23 |
24 | def __init__(self):
25 | self.report_timestamp = datetime.now(timezone.utc)
26 |
27 | def generate_comprehensive_report(self, validation_result: ScriptValidationResult) -> Dict[str, Any]:
28 | """Generate a comprehensive report in JSON format"""
29 |
30 | # Categorize validations by status (knowledge graph items only)
31 | valid_items = []
32 | invalid_items = []
33 | uncertain_items = []
34 | not_found_items = []
35 |
36 | # Process imports (only knowledge graph ones)
37 | for val in validation_result.import_validations:
38 | if not val.validation.details.get('in_knowledge_graph', False):
39 | continue # Skip external libraries
40 | item = {
41 | 'type': 'IMPORT',
42 | 'name': val.import_info.module,
43 | 'line': val.import_info.line_number,
44 | 'status': val.validation.status.value,
45 | 'confidence': val.validation.confidence,
46 | 'message': val.validation.message,
47 | 'details': {
48 | 'is_from_import': val.import_info.is_from_import,
49 | 'alias': val.import_info.alias,
50 | 'available_classes': val.available_classes,
51 | 'available_functions': val.available_functions
52 | }
53 | }
54 | self._categorize_item(item, val.validation.status, valid_items, invalid_items, uncertain_items, not_found_items)
55 |
56 | # Process classes (only knowledge graph ones)
57 | for val in validation_result.class_validations:
58 | class_name = val.class_instantiation.full_class_name or val.class_instantiation.class_name
59 | if not self._is_from_knowledge_graph(class_name, validation_result):
60 | continue # Skip external classes
61 | item = {
62 | 'type': 'CLASS_INSTANTIATION',
63 | 'name': val.class_instantiation.class_name,
64 | 'full_name': val.class_instantiation.full_class_name,
65 | 'variable': val.class_instantiation.variable_name,
66 | 'line': val.class_instantiation.line_number,
67 | 'status': val.validation.status.value,
68 | 'confidence': val.validation.confidence,
69 | 'message': val.validation.message,
70 | 'details': {
71 | 'args_provided': val.class_instantiation.args,
72 | 'kwargs_provided': list(val.class_instantiation.kwargs.keys()),
73 | 'constructor_params': val.constructor_params,
74 | 'parameter_validation': self._serialize_validation_result(val.parameter_validation) if val.parameter_validation else None
75 | }
76 | }
77 | self._categorize_item(item, val.validation.status, valid_items, invalid_items, uncertain_items, not_found_items)
78 |
79 | # Track reported items to avoid duplicates
80 | reported_items = set()
81 |
82 | # Process methods (only knowledge graph ones)
83 | for val in validation_result.method_validations:
84 | if not (val.method_call.object_type and self._is_from_knowledge_graph(val.method_call.object_type, validation_result)):
85 | continue # Skip external methods
86 |
87 | # Create unique key to avoid duplicates
88 | key = (val.method_call.line_number, val.method_call.method_name, val.method_call.object_type)
89 | if key not in reported_items:
90 | reported_items.add(key)
91 | item = {
92 | 'type': 'METHOD_CALL',
93 | 'name': val.method_call.method_name,
94 | 'object': val.method_call.object_name,
95 | 'object_type': val.method_call.object_type,
96 | 'line': val.method_call.line_number,
97 | 'status': val.validation.status.value,
98 | 'confidence': val.validation.confidence,
99 | 'message': val.validation.message,
100 | 'details': {
101 | 'args_provided': val.method_call.args,
102 | 'kwargs_provided': list(val.method_call.kwargs.keys()),
103 | 'expected_params': val.expected_params,
104 | 'parameter_validation': self._serialize_validation_result(val.parameter_validation) if val.parameter_validation else None,
105 | 'suggestions': val.validation.suggestions
106 | }
107 | }
108 | self._categorize_item(item, val.validation.status, valid_items, invalid_items, uncertain_items, not_found_items)
109 |
110 | # Process attributes (only knowledge graph ones) - but skip if already reported as method
111 | for val in validation_result.attribute_validations:
112 | if not (val.attribute_access.object_type and self._is_from_knowledge_graph(val.attribute_access.object_type, validation_result)):
113 | continue # Skip external attributes
114 |
115 | # Create unique key - if this was already reported as a method, skip it
116 | key = (val.attribute_access.line_number, val.attribute_access.attribute_name, val.attribute_access.object_type)
117 | if key not in reported_items:
118 | reported_items.add(key)
119 | item = {
120 | 'type': 'ATTRIBUTE_ACCESS',
121 | 'name': val.attribute_access.attribute_name,
122 | 'object': val.attribute_access.object_name,
123 | 'object_type': val.attribute_access.object_type,
124 | 'line': val.attribute_access.line_number,
125 | 'status': val.validation.status.value,
126 | 'confidence': val.validation.confidence,
127 | 'message': val.validation.message,
128 | 'details': {
129 | 'expected_type': val.expected_type
130 | }
131 | }
132 | self._categorize_item(item, val.validation.status, valid_items, invalid_items, uncertain_items, not_found_items)
133 |
134 | # Process functions (only knowledge graph ones)
135 | for val in validation_result.function_validations:
136 | if not (val.function_call.full_name and self._is_from_knowledge_graph(val.function_call.full_name, validation_result)):
137 | continue # Skip external functions
138 | item = {
139 | 'type': 'FUNCTION_CALL',
140 | 'name': val.function_call.function_name,
141 | 'full_name': val.function_call.full_name,
142 | 'line': val.function_call.line_number,
143 | 'status': val.validation.status.value,
144 | 'confidence': val.validation.confidence,
145 | 'message': val.validation.message,
146 | 'details': {
147 | 'args_provided': val.function_call.args,
148 | 'kwargs_provided': list(val.function_call.kwargs.keys()),
149 | 'expected_params': val.expected_params,
150 | 'parameter_validation': self._serialize_validation_result(val.parameter_validation) if val.parameter_validation else None
151 | }
152 | }
153 | self._categorize_item(item, val.validation.status, valid_items, invalid_items, uncertain_items, not_found_items)
154 |
155 | # Create library summary
156 | library_summary = self._create_library_summary(validation_result)
157 |
158 | # Generate report
159 | report = {
160 | 'analysis_metadata': {
161 | 'script_path': validation_result.script_path,
162 | 'analysis_timestamp': self.report_timestamp.isoformat(),
163 | 'total_imports': len(validation_result.import_validations),
164 | 'total_classes': len(validation_result.class_validations),
165 | 'total_methods': len(validation_result.method_validations),
166 | 'total_attributes': len(validation_result.attribute_validations),
167 | 'total_functions': len(validation_result.function_validations)
168 | },
169 | 'validation_summary': {
170 | 'overall_confidence': validation_result.overall_confidence,
171 | 'total_validations': len(valid_items) + len(invalid_items) + len(uncertain_items) + len(not_found_items),
172 | 'valid_count': len(valid_items),
173 | 'invalid_count': len(invalid_items),
174 | 'uncertain_count': len(uncertain_items),
175 | 'not_found_count': len(not_found_items),
176 | 'hallucination_rate': len(invalid_items + not_found_items) / max(1, len(valid_items) + len(invalid_items) + len(not_found_items))
177 | },
178 | 'libraries_analyzed': library_summary,
179 | 'validation_details': {
180 | 'valid_items': valid_items,
181 | 'invalid_items': invalid_items,
182 | 'uncertain_items': uncertain_items,
183 | 'not_found_items': not_found_items
184 | },
185 | 'hallucinations_detected': validation_result.hallucinations_detected,
186 | 'recommendations': self._generate_recommendations(validation_result)
187 | }
188 |
189 | return report
190 |
191 | def _is_from_knowledge_graph(self, item_name: str, validation_result) -> bool:
192 | """Check if an item is from a knowledge graph module"""
193 | if not item_name:
194 | return False
195 |
196 | # Get knowledge graph modules from import validations
197 | kg_modules = set()
198 | for val in validation_result.import_validations:
199 | if val.validation.details.get('in_knowledge_graph', False):
200 | kg_modules.add(val.import_info.module)
201 | if '.' in val.import_info.module:
202 | kg_modules.add(val.import_info.module.split('.')[0])
203 |
204 | # Check if the item belongs to any knowledge graph module
205 | if '.' in item_name:
206 | base_module = item_name.split('.')[0]
207 | return base_module in kg_modules
208 |
209 | return any(item_name in module or module.endswith(item_name) for module in kg_modules)
210 |
211 | def _serialize_validation_result(self, validation_result) -> Dict[str, Any]:
212 | """Convert ValidationResult to JSON-serializable dictionary"""
213 | if validation_result is None:
214 | return None
215 |
216 | return {
217 | 'status': validation_result.status.value,
218 | 'confidence': validation_result.confidence,
219 | 'message': validation_result.message,
220 | 'details': validation_result.details,
221 | 'suggestions': validation_result.suggestions
222 | }
223 |
224 | def _categorize_item(self, item: Dict[str, Any], status: ValidationStatus,
225 | valid_items: List, invalid_items: List, uncertain_items: List, not_found_items: List):
226 | """Categorize validation item by status"""
227 | if status == ValidationStatus.VALID:
228 | valid_items.append(item)
229 | elif status == ValidationStatus.INVALID:
230 | invalid_items.append(item)
231 | elif status == ValidationStatus.UNCERTAIN:
232 | uncertain_items.append(item)
233 | elif status == ValidationStatus.NOT_FOUND:
234 | not_found_items.append(item)
235 |
236 | def _create_library_summary(self, validation_result: ScriptValidationResult) -> List[Dict[str, Any]]:
237 | """Create summary of libraries analyzed"""
238 | library_stats = {}
239 |
240 | # Aggregate stats by library/module
241 | for val in validation_result.import_validations:
242 | module = val.import_info.module
243 | if module not in library_stats:
244 | library_stats[module] = {
245 | 'module_name': module,
246 | 'import_status': val.validation.status.value,
247 | 'import_confidence': val.validation.confidence,
248 | 'classes_used': [],
249 | 'methods_called': [],
250 | 'attributes_accessed': [],
251 | 'functions_called': []
252 | }
253 |
254 | # Add class usage
255 | for val in validation_result.class_validations:
256 | class_name = val.class_instantiation.class_name
257 | full_name = val.class_instantiation.full_class_name
258 |
259 | # Try to match to library
260 | if full_name:
261 | parts = full_name.split('.')
262 | if len(parts) > 1:
263 | module = '.'.join(parts[:-1])
264 | if module in library_stats:
265 | library_stats[module]['classes_used'].append({
266 | 'class_name': class_name,
267 | 'status': val.validation.status.value,
268 | 'confidence': val.validation.confidence
269 | })
270 |
271 | # Add method usage
272 | for val in validation_result.method_validations:
273 | method_name = val.method_call.method_name
274 | object_type = val.method_call.object_type
275 |
276 | if object_type:
277 | parts = object_type.split('.')
278 | if len(parts) > 1:
279 | module = '.'.join(parts[:-1])
280 | if module in library_stats:
281 | library_stats[module]['methods_called'].append({
282 | 'method_name': method_name,
283 | 'class_name': parts[-1],
284 | 'status': val.validation.status.value,
285 | 'confidence': val.validation.confidence
286 | })
287 |
288 | # Add attribute usage
289 | for val in validation_result.attribute_validations:
290 | attr_name = val.attribute_access.attribute_name
291 | object_type = val.attribute_access.object_type
292 |
293 | if object_type:
294 | parts = object_type.split('.')
295 | if len(parts) > 1:
296 | module = '.'.join(parts[:-1])
297 | if module in library_stats:
298 | library_stats[module]['attributes_accessed'].append({
299 | 'attribute_name': attr_name,
300 | 'class_name': parts[-1],
301 | 'status': val.validation.status.value,
302 | 'confidence': val.validation.confidence
303 | })
304 |
305 | # Add function usage
306 | for val in validation_result.function_validations:
307 | func_name = val.function_call.function_name
308 | full_name = val.function_call.full_name
309 |
310 | if full_name:
311 | parts = full_name.split('.')
312 | if len(parts) > 1:
313 | module = '.'.join(parts[:-1])
314 | if module in library_stats:
315 | library_stats[module]['functions_called'].append({
316 | 'function_name': func_name,
317 | 'status': val.validation.status.value,
318 | 'confidence': val.validation.confidence
319 | })
320 |
321 | return list(library_stats.values())
322 |
323 | def _generate_recommendations(self, validation_result: ScriptValidationResult) -> List[str]:
324 | """Generate recommendations based on validation results"""
325 | recommendations = []
326 |
327 | # Only count actual hallucinations (from knowledge graph libraries)
328 | kg_hallucinations = [h for h in validation_result.hallucinations_detected]
329 |
330 | if kg_hallucinations:
331 | method_issues = [h for h in kg_hallucinations if h['type'] == 'METHOD_NOT_FOUND']
332 | attr_issues = [h for h in kg_hallucinations if h['type'] == 'ATTRIBUTE_NOT_FOUND']
333 | param_issues = [h for h in kg_hallucinations if h['type'] == 'INVALID_PARAMETERS']
334 |
335 | if method_issues:
336 | recommendations.append(
337 | f"Found {len(method_issues)} non-existent methods in knowledge graph libraries. "
338 | "Consider checking the official documentation for correct method names."
339 | )
340 |
341 | if attr_issues:
342 | recommendations.append(
343 | f"Found {len(attr_issues)} non-existent attributes in knowledge graph libraries. "
344 | "Verify attribute names against the class documentation."
345 | )
346 |
347 | if param_issues:
348 | recommendations.append(
349 | f"Found {len(param_issues)} parameter mismatches in knowledge graph libraries. "
350 | "Check function signatures for correct parameter names and types."
351 | )
352 | else:
353 | recommendations.append(
354 | "No hallucinations detected in knowledge graph libraries. "
355 | "External library usage appears to be working as expected."
356 | )
357 |
358 | if validation_result.overall_confidence < 0.7:
359 | recommendations.append(
360 | "Overall confidence is moderate. Most validations were for external libraries not in the knowledge graph."
361 | )
362 |
363 | return recommendations
364 |
365 | def save_json_report(self, report: Dict[str, Any], output_path: str):
366 | """Save report as JSON file"""
367 | with open(output_path, 'w', encoding='utf-8') as f:
368 | json.dump(report, f, indent=2, ensure_ascii=False)
369 |
370 | logger.info(f"JSON report saved to: {output_path}")
371 |
372 | def save_markdown_report(self, report: Dict[str, Any], output_path: str):
373 | """Save report as Markdown file"""
374 | md_content = self._generate_markdown_content(report)
375 |
376 | with open(output_path, 'w', encoding='utf-8') as f:
377 | f.write(md_content)
378 |
379 | logger.info(f"Markdown report saved to: {output_path}")
380 |
381 | def _generate_markdown_content(self, report: Dict[str, Any]) -> str:
382 | """Generate Markdown content from report"""
383 | md = []
384 |
385 | # Header
386 | md.append("# AI Hallucination Detection Report")
387 | md.append("")
388 | md.append(f"**Script:** `{report['analysis_metadata']['script_path']}`")
389 | md.append(f"**Analysis Date:** {report['analysis_metadata']['analysis_timestamp']}")
390 | md.append(f"**Overall Confidence:** {report['validation_summary']['overall_confidence']:.2%}")
391 | md.append("")
392 |
393 | # Summary
394 | summary = report['validation_summary']
395 | md.append("## Summary")
396 | md.append("")
397 | md.append(f"- **Total Validations:** {summary['total_validations']}")
398 | md.append(f"- **Valid:** {summary['valid_count']} ({summary['valid_count']/summary['total_validations']:.1%})")
399 | md.append(f"- **Invalid:** {summary['invalid_count']} ({summary['invalid_count']/summary['total_validations']:.1%})")
400 | md.append(f"- **Not Found:** {summary['not_found_count']} ({summary['not_found_count']/summary['total_validations']:.1%})")
401 | md.append(f"- **Uncertain:** {summary['uncertain_count']} ({summary['uncertain_count']/summary['total_validations']:.1%})")
402 | md.append(f"- **Hallucination Rate:** {summary['hallucination_rate']:.1%}")
403 | md.append("")
404 |
405 | # Hallucinations
406 | if report['hallucinations_detected']:
407 | md.append("## 🚨 Hallucinations Detected")
408 | md.append("")
409 | for i, hallucination in enumerate(report['hallucinations_detected'], 1):
410 | md.append(f"### {i}. {hallucination['type'].replace('_', ' ').title()}")
411 | md.append(f"**Location:** {hallucination['location']}")
412 | md.append(f"**Description:** {hallucination['description']}")
413 | if hallucination.get('suggestion'):
414 | md.append(f"**Suggestion:** {hallucination['suggestion']}")
415 | md.append("")
416 |
417 | # Libraries
418 | if report['libraries_analyzed']:
419 | md.append("## 📚 Libraries Analyzed")
420 | md.append("")
421 | for lib in report['libraries_analyzed']:
422 | md.append(f"### {lib['module_name']}")
423 | md.append(f"**Import Status:** {lib['import_status']}")
424 | md.append(f"**Import Confidence:** {lib['import_confidence']:.2%}")
425 |
426 | if lib['classes_used']:
427 | md.append("**Classes Used:**")
428 | for cls in lib['classes_used']:
429 | status_emoji = "✅" if cls['status'] == 'VALID' else "❌"
430 | md.append(f" - {status_emoji} `{cls['class_name']}` ({cls['confidence']:.1%})")
431 |
432 | if lib['methods_called']:
433 | md.append("**Methods Called:**")
434 | for method in lib['methods_called']:
435 | status_emoji = "✅" if method['status'] == 'VALID' else "❌"
436 | md.append(f" - {status_emoji} `{method['class_name']}.{method['method_name']}()` ({method['confidence']:.1%})")
437 |
438 | if lib['attributes_accessed']:
439 | md.append("**Attributes Accessed:**")
440 | for attr in lib['attributes_accessed']:
441 | status_emoji = "✅" if attr['status'] == 'VALID' else "❌"
442 | md.append(f" - {status_emoji} `{attr['class_name']}.{attr['attribute_name']}` ({attr['confidence']:.1%})")
443 |
444 | if lib['functions_called']:
445 | md.append("**Functions Called:**")
446 | for func in lib['functions_called']:
447 | status_emoji = "✅" if func['status'] == 'VALID' else "❌"
448 | md.append(f" - {status_emoji} `{func['function_name']}()` ({func['confidence']:.1%})")
449 |
450 | md.append("")
451 |
452 | # Recommendations
453 | if report['recommendations']:
454 | md.append("## 💡 Recommendations")
455 | md.append("")
456 | for rec in report['recommendations']:
457 | md.append(f"- {rec}")
458 | md.append("")
459 |
460 | # Detailed Results
461 | md.append("## 📋 Detailed Validation Results")
462 | md.append("")
463 |
464 | # Invalid items
465 | invalid_items = report['validation_details']['invalid_items']
466 | if invalid_items:
467 | md.append("### ❌ Invalid Items")
468 | md.append("")
469 | for item in invalid_items:
470 | md.append(f"- **{item['type']}** `{item['name']}` (Line {item['line']}) - {item['message']}")
471 | md.append("")
472 |
473 | # Not found items
474 | not_found_items = report['validation_details']['not_found_items']
475 | if not_found_items:
476 | md.append("### 🔍 Not Found Items")
477 | md.append("")
478 | for item in not_found_items:
479 | md.append(f"- **{item['type']}** `{item['name']}` (Line {item['line']}) - {item['message']}")
480 | md.append("")
481 |
482 | # Valid items (sample)
483 | valid_items = report['validation_details']['valid_items']
484 | if valid_items:
485 | md.append("### ✅ Valid Items (Sample)")
486 | md.append("")
487 | for item in valid_items[:10]: # Show first 10
488 | md.append(f"- **{item['type']}** `{item['name']}` (Line {item['line']}) - {item['message']}")
489 | if len(valid_items) > 10:
490 | md.append(f"- ... and {len(valid_items) - 10} more valid items")
491 | md.append("")
492 |
493 | return "\n".join(md)
494 |
495 | def print_summary(self, report: Dict[str, Any]):
496 | """Print a concise summary to console"""
497 | print("\n" + "="*80)
498 | print("🤖 AI HALLUCINATION DETECTION REPORT")
499 | print("="*80)
500 |
501 | print(f"Script: {report['analysis_metadata']['script_path']}")
502 | print(f"Overall Confidence: {report['validation_summary']['overall_confidence']:.1%}")
503 |
504 | summary = report['validation_summary']
505 | print(f"\nValidation Results:")
506 | print(f" ✅ Valid: {summary['valid_count']}")
507 | print(f" ❌ Invalid: {summary['invalid_count']}")
508 | print(f" 🔍 Not Found: {summary['not_found_count']}")
509 | print(f" ❓ Uncertain: {summary['uncertain_count']}")
510 | print(f" 📊 Hallucination Rate: {summary['hallucination_rate']:.1%}")
511 |
512 | if report['hallucinations_detected']:
513 | print(f"\n🚨 {len(report['hallucinations_detected'])} Hallucinations Detected:")
514 | for hall in report['hallucinations_detected'][:5]: # Show first 5
515 | print(f" - {hall['type'].replace('_', ' ').title()} at {hall['location']}")
516 | print(f" {hall['description']}")
517 |
518 | if report['recommendations']:
519 | print(f"\n💡 Recommendations:")
520 | for rec in report['recommendations'][:3]: # Show first 3
521 | print(f" - {rec}")
522 |
523 | print("="*80)
```
--------------------------------------------------------------------------------
/neo4j/docker-neo4j/src/test/java/com/neo4j/docker/coredb/configurations/TestConfSettings.java:
--------------------------------------------------------------------------------
```java
1 | package com.neo4j.docker.coredb.configurations;
2 |
3 | import com.neo4j.docker.coredb.plugins.Neo4jPluginEnv;
4 | import com.neo4j.docker.utils.DatabaseIO;
5 | import com.neo4j.docker.utils.Neo4jVersion;
6 | import com.neo4j.docker.utils.SetContainerUser;
7 | import com.neo4j.docker.utils.TemporaryFolderManager;
8 | import com.neo4j.docker.utils.TestSettings;
9 | import com.neo4j.docker.utils.WaitStrategies;
10 | import org.junit.jupiter.api.Assertions;
11 | import org.junit.jupiter.api.Assumptions;
12 | import org.junit.jupiter.api.BeforeAll;
13 | import org.junit.jupiter.api.Tag;
14 | import org.junit.jupiter.api.Test;
15 | import org.junit.jupiter.api.extension.RegisterExtension;
16 | import org.neo4j.driver.exceptions.ClientException;
17 | import org.slf4j.Logger;
18 | import org.slf4j.LoggerFactory;
19 | import org.testcontainers.containers.GenericContainer;
20 | import org.testcontainers.containers.output.OutputFrame;
21 | import org.testcontainers.containers.output.Slf4jLogConsumer;
22 | import org.testcontainers.containers.wait.strategy.LogMessageWaitStrategy;
23 |
24 | import java.io.File;
25 | import java.io.FileNotFoundException;
26 | import java.io.IOException;
27 | import java.nio.file.Files;
28 | import java.nio.file.Path;
29 | import java.time.Duration;
30 | import java.util.HashMap;
31 | import java.util.Map;
32 | import java.util.Scanner;
33 | import java.util.stream.Stream;
34 |
35 | public class TestConfSettings
36 | {
37 | private static final String PASSWORD = "none";
38 | private static final String AUTH = "none"; // or "neo4j/"+PASSWORD if we want authentication
39 | private final Logger log = LoggerFactory.getLogger(TestConfSettings.class);
40 | private static Path confFolder;
41 | private static Map<Setting,Configuration> confNames;
42 | @RegisterExtension
43 | public static TemporaryFolderManager temporaryFolderManager = new TemporaryFolderManager();
44 |
45 | @BeforeAll
46 | static void getVersionSpecificConfigurationSettings()
47 | {
48 | confFolder = Configuration.getConfigurationResourcesFolder();
49 | confNames = Configuration.getConfigurationNameMap();
50 | }
51 |
52 | private GenericContainer createContainer()
53 | {
54 | return new GenericContainer(TestSettings.IMAGE_ID)
55 | .withEnv("NEO4J_AUTH", AUTH)
56 | .withEnv("NEO4J_ACCEPT_LICENSE_AGREEMENT", "yes")
57 | .withExposedPorts(7474, 7687)
58 | .withLogConsumer(new Slf4jLogConsumer(log));
59 | }
60 |
61 | private GenericContainer makeContainerDumpConfig(GenericContainer container)
62 | {
63 | SetContainerUser.nonRootUser( container );
64 | container.setCommand("dump-config");
65 | WaitStrategies.waitUntilContainerFinished(container, Duration.ofSeconds(30));
66 | return container;
67 | }
68 |
69 | private Map<String, String> parseConfFile(File conf) throws FileNotFoundException
70 | {
71 | Map<String, String> configurations = new HashMap<>();
72 | Scanner scanner = new Scanner(conf);
73 | while ( scanner.hasNextLine() )
74 | {
75 | String[] params = scanner.nextLine().split( "=", 2 );
76 | if(params.length < 2)
77 | {
78 | continue;
79 | }
80 | log.debug( params[0] + "\t:\t" + params[1] );
81 | configurations.put( params[0], params[1] );
82 | }
83 | return configurations;
84 | }
85 |
86 | private void assertConfigurationPresentInDebugLog( Path debugLog, Configuration setting, String value, boolean shouldBeFound ) throws IOException
87 | {
88 | // searches the debug log for the given string, returns true if present
89 | Stream<String> lines = Files.lines(debugLog);
90 | String actualSetting = lines.filter(s -> s.contains( setting.name ))
91 | .findFirst()
92 | .orElse( "" );
93 | lines.close();
94 | if(shouldBeFound)
95 | {
96 | Assertions.assertTrue( !actualSetting.isEmpty(), setting.name+" was never set" );
97 | Assertions.assertTrue( actualSetting.contains( value ),
98 | setting.name +" is set to the wrong value. Expected: "+
99 | value +" Actual: " + actualSetting );
100 | }
101 | else
102 | {
103 | Assertions.assertTrue( actualSetting.isEmpty(),setting.name+" was set when it should not have been. " +
104 | "Actual value: "+actualSetting );
105 | }
106 | }
107 |
108 | @Test
109 | void testIgnoreNumericVars()
110 | {
111 | try(GenericContainer container = createContainer())
112 | {
113 | container.withEnv( "NEO4J_1a", "1" )
114 | .waitingFor( WaitStrategies.waitForBoltReady() );
115 | container.start();
116 | Assertions.assertTrue( container.isRunning() );
117 | String errorLogs = container.getLogs( OutputFrame.OutputType.STDERR);
118 | Assertions.assertTrue( errorLogs.contains( "WARNING: 1a not written to conf file. Settings that start with a number are not permitted" ),
119 | "Neo4j did not warn about invalid numeric config variable `Neo4j_1a`.\n" +
120 | "Actual warnings were:\n"+errorLogs);
121 | }
122 | }
123 |
124 | @Test
125 | void testEnvVarsOverrideDefaultConfigurations() throws Exception
126 | {
127 | Assumptions.assumeTrue(TestSettings.NEO4J_VERSION.isAtLeastVersion(new Neo4jVersion(3, 0, 0)),
128 | "No neo4j-admin in 2.3: skipping neo4j-admin-conf-override test");
129 | File conf;
130 | Map<Setting,String> expectedValues = new HashMap<Setting,String>() {{
131 | put( Setting.MEMORY_PAGECACHE_SIZE, "1000m");
132 | put( Setting.MEMORY_HEAP_INITIALSIZE, "2000m");
133 | put( Setting.MEMORY_HEAP_MAXSIZE, "3000m");
134 | put( Setting.DIRECTORIES_LOGS, "/notdefaultlogs" );
135 | put( Setting.DIRECTORIES_DATA, "/notdefaultdata" );
136 | }};
137 |
138 | try(GenericContainer container = createContainer())
139 | {
140 | for(Setting s : expectedValues.keySet())
141 | {
142 | container.withEnv( confNames.get( s ).envName, expectedValues.get( s ) );
143 | }
144 | Path confMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/conf");
145 | conf = confMount.resolve( "neo4j.conf" ).toFile();
146 | makeContainerDumpConfig( container );
147 | container.start();
148 | }
149 |
150 | // now check the settings we set via env are in the new conf file
151 | Assertions.assertTrue( conf.exists(), "configuration file not written" );
152 | Assertions.assertTrue( conf.canRead(), "configuration file not readable for some reason?" );
153 |
154 | Map<String,String> configurations = parseConfFile( conf );
155 | for(Setting s : expectedValues.keySet())
156 | {
157 | Assertions.assertTrue( configurations.containsKey( confNames.get( s ).name ),
158 | confNames.get( s ).name + " not set at all" );
159 | Assertions.assertEquals( expectedValues.get( s ),
160 | configurations.get( confNames.get( s ).name ),
161 | confNames.get( s ).name + " not overridden" );
162 | }
163 | }
164 |
165 | @Test
166 | void testReadsTheConfFile() throws Exception
167 | {
168 | Path debugLog;
169 |
170 | try(GenericContainer container = createContainer().waitingFor(WaitStrategies.waitForNeo4jReady(PASSWORD)))
171 | {
172 | //Mount /conf
173 | Path confMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/conf");
174 | Path logMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/logs");
175 | debugLog = logMount.resolve("debug.log");
176 | SetContainerUser.nonRootUser( container );
177 | //Create ReadConf.conf file with the custom env variables
178 | Path confFile = confFolder.resolve( "ReadConf.conf" );
179 | Files.copy( confFile, confMount.resolve( "neo4j.conf" ) );
180 | //Start the container
181 | container.start();
182 | }
183 |
184 | //Check if the container reads the conf file
185 | assertConfigurationPresentInDebugLog( debugLog, confNames.get( Setting.MEMORY_HEAP_MAXSIZE ),
186 | "512", true );
187 | }
188 |
189 | @Test
190 | void testDefaultsConfigsAreSet() throws Exception
191 | {
192 | try(GenericContainer container = createContainer().waitingFor(WaitStrategies.waitForNeo4jReady(PASSWORD)))
193 | {
194 | //Mount /logs
195 | Path logMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/logs");
196 | SetContainerUser.nonRootUser( container );
197 | //Start the container
198 | container.start();
199 | DatabaseIO dbio = new DatabaseIO( container );
200 | Path debugLog = logMount.resolve( "debug.log" );
201 |
202 | String expectedDefaultListenAddress = "0.0.0.0";
203 | dbio.verifyConfigurationSetting("neo4j", PASSWORD, confNames.get( Setting.DEFAULT_LISTEN_ADDRESS), expectedDefaultListenAddress);
204 | assertConfigurationPresentInDebugLog(debugLog, confNames.get( Setting.DEFAULT_LISTEN_ADDRESS), expectedDefaultListenAddress, true);
205 | // test enterprise only default configurations are set
206 | if (TestSettings.EDITION == TestSettings.Edition.ENTERPRISE) {
207 | String expectedTxAddress = container.getContainerId().substring(0, 12) + ":6000";
208 | String expectedRaftAddress = container.getContainerId().substring(0, 12) + ":7000";
209 | String expectedRoutingAddress = container.getContainerId().substring(0, 12) + ":7688";
210 | dbio.verifyConfigurationSetting("neo4j", PASSWORD, confNames.get( Setting.CLUSTER_TRANSACTION_ADDRESS), expectedTxAddress);
211 | assertConfigurationPresentInDebugLog(debugLog, confNames.get( Setting.CLUSTER_TRANSACTION_ADDRESS), expectedTxAddress,true);
212 | dbio.verifyConfigurationSetting("neo4j", PASSWORD, confNames.get( Setting.CLUSTER_RAFT_ADDRESS), expectedRaftAddress);
213 | assertConfigurationPresentInDebugLog(debugLog, confNames.get( Setting.CLUSTER_RAFT_ADDRESS), expectedRaftAddress,true);
214 | dbio.verifyConfigurationSetting("neo4j", PASSWORD, confNames.get( Setting.CLUSTER_ROUTING_ADDRESS), expectedRoutingAddress);
215 | assertConfigurationPresentInDebugLog(debugLog, confNames.get( Setting.CLUSTER_ROUTING_ADDRESS), expectedRoutingAddress,true);
216 | }
217 | }
218 | }
219 |
220 | @Test
221 | void testCommentedConfigsAreReplacedByDefaultOnes() throws Exception
222 | {
223 | File conf;
224 | try(GenericContainer container = createContainer())
225 | {
226 | //Mount /conf
227 | Path confMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/conf");
228 | conf = confMount.resolve( "neo4j.conf" ).toFile();
229 | SetContainerUser.nonRootUser( container );
230 | //Create ConfsReplaced.conf file in mounted folder
231 | Files.copy( confFolder.resolve( "ConfsReplaced.conf" ), conf.toPath() );
232 | makeContainerDumpConfig( container );
233 | //Start the container
234 | container.start();
235 | }
236 | //Read the config file to check if the config is set correctly
237 | Map<String,String> configurations = parseConfFile( conf );
238 | Assertions.assertTrue( configurations.containsKey( confNames.get( Setting.MEMORY_PAGECACHE_SIZE ).name ),
239 | "conf settings not set correctly by docker-entrypoint" );
240 | Assertions.assertEquals( "512M",
241 | configurations.get(confNames.get( Setting.MEMORY_PAGECACHE_SIZE ).name),
242 | "conf settings not appended correctly by docker-entrypoint" );
243 | }
244 |
245 | @Test
246 | void testConfFileNotOverridenByDockerEntrypoint() throws Exception
247 | {
248 | File conf;
249 | try(GenericContainer container = createContainer())
250 | {
251 | //Mount /conf
252 | Path confMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/conf");
253 | conf = confMount.resolve( "neo4j.conf" ).toFile();
254 | SetContainerUser.nonRootUser( container );
255 | //Create ConfsNotOverridden.conf file
256 | Path confFile = confFolder.resolve( "ConfsNotOverridden.conf" );
257 | Files.copy( confFile, confMount.resolve( "neo4j.conf" ) );
258 | makeContainerDumpConfig( container );
259 | container.start();
260 | }
261 |
262 | //Read the config file to check if the config is not overriden
263 | Map<String, String> configurations = parseConfFile(conf);
264 | Assertions.assertTrue(configurations.containsKey(confNames.get( Setting.MEMORY_PAGECACHE_SIZE).name),
265 | "conf settings not set correctly by docker-entrypoint");
266 | Assertions.assertEquals("1024M",
267 | configurations.get(confNames.get( Setting.MEMORY_PAGECACHE_SIZE).name),
268 | "docker-entrypoint has overridden custom setting set from user's conf");
269 | }
270 |
271 | @Test
272 | void testOldConfigNamesNotOverwrittenByDockerDefaults() throws Exception
273 | {
274 | Assumptions.assumeTrue( TestSettings.NEO4J_VERSION.isAtLeastVersion( Neo4jVersion.NEO4J_VERSION_500),
275 | "test only applicable after 5.0." );
276 | // at some point we will fully deprecate old config names, at which point we add an assume-version-less-than here
277 | Path logMount;
278 | Map<Setting,Configuration> oldConfMap = Configuration.getConfigurationNameMap( new Neo4jVersion( 4, 4, 0 ) );
279 | Map<Setting,String> expectedValues = new HashMap<Setting,String>() {{
280 | put( Setting.TXLOG_RETENTION_POLICY, "5M size" );
281 | put( Setting.MEMORY_PAGECACHE_SIZE, "100.00KiB" );
282 | put( Setting.DEFAULT_LISTEN_ADDRESS, "127.0.0.1" );
283 | }};
284 | if( TestSettings.EDITION == TestSettings.Edition.ENTERPRISE)
285 | {
286 | expectedValues.put( Setting.CLUSTER_TRANSACTION_ADDRESS, "1.2.3.4:8000" );
287 | expectedValues.put( Setting.CLUSTER_RAFT_ADDRESS, "1.2.3.4:9000" );
288 | }
289 |
290 | try(GenericContainer container = createContainer())
291 | {
292 | logMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/logs");
293 | SetContainerUser.nonRootUser( container );
294 | // set configurations using old config names
295 | for( Setting s : expectedValues.keySet() )
296 | {
297 | container.withEnv( oldConfMap.get( s ).envName, expectedValues.get( s ) );
298 | }
299 | // the container probably won't start nicely because the clustering settings are ivalid.
300 | // However we only care that the configs were read properly, so we can kill as soon as neo4j logs that it started.
301 | container.waitingFor( new LogMessageWaitStrategy()
302 | .withRegEx( ".*Remote interface available at http://localhost:7474/.*" )
303 | .withStartupTimeout( Duration.ofSeconds( 60 ) ));
304 | container.start();
305 | }
306 | for( Setting s : expectedValues.keySet() )
307 | {
308 | // configuration should be present in debug log under new configuration name
309 | assertConfigurationPresentInDebugLog(logMount.resolve( "debug.log" ),
310 | confNames.get( s ),
311 | expectedValues.get( s ),
312 | true );
313 | }
314 | }
315 |
316 | @Test
317 | void testEnvVarsOverrideConfFile() throws Exception
318 | {
319 | Assumptions.assumeTrue(TestSettings.NEO4J_VERSION.isAtLeastVersion(new Neo4jVersion(4, 2, 0)),
320 | "test not applicable in versions before 4.2.");
321 | Path debugLog;
322 | try(GenericContainer container = createContainer()
323 | .withEnv(confNames.get(Setting.MEMORY_PAGECACHE_SIZE).envName, "512.00MiB")
324 | .waitingFor(WaitStrategies.waitForNeo4jReady(PASSWORD)))
325 | {
326 | Path confMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/conf");
327 | Path logMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/logs");
328 | debugLog = logMount.resolve( "debug.log" );
329 | SetContainerUser.nonRootUser( container );
330 | //Create EnvVarsOverride.conf file
331 | Path confFile = confFolder.resolve("EnvVarsOverride.conf");
332 | Files.copy( confFile, confMount.resolve( "neo4j.conf" ) );
333 | //Start the container
334 | container.start();
335 | }
336 | assertConfigurationPresentInDebugLog(debugLog, confNames.get(Setting.MEMORY_PAGECACHE_SIZE), "512.00MiB", true );
337 | }
338 |
339 | @Test
340 | void testEnterpriseOnlyDefaultsDontOverrideConfFile() throws Exception
341 | {
342 | Assumptions.assumeTrue(TestSettings.EDITION == TestSettings.Edition.ENTERPRISE,
343 | "This is testing only ENTERPRISE EDITION configs");
344 |
345 | try(GenericContainer container = createContainer().waitingFor(WaitStrategies.waitForNeo4jReady(PASSWORD)))
346 | {
347 | Path confMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/conf");
348 | Path logMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/logs");
349 | // mount a configuration file with enterprise only settings already set
350 | Path confFile = confFolder.resolve( "EnterpriseOnlyNotOverwritten.conf" );
351 | Files.copy( confFile, confMount.resolve( "neo4j.conf" ) );
352 |
353 | //Start the container
354 | SetContainerUser.nonRootUser( container );
355 | container.start();
356 | //Read debug.log to check that cluster confs are set successfully
357 | assertConfigurationPresentInDebugLog( logMount.resolve( "debug.log" ),
358 | confNames.get( Setting.CLUSTER_TRANSACTION_ADDRESS ),
359 | "localhost:6060", true );
360 | }
361 | }
362 |
363 | @Test
364 | void testMountingMetricsFolderShouldNotSetConfInCommunity() throws Exception
365 | {
366 | Assumptions.assumeTrue( TestSettings.EDITION == TestSettings.Edition.COMMUNITY,
367 | "Test only valid with community edition");
368 |
369 | try ( GenericContainer container = createContainer() )
370 | {
371 | temporaryFolderManager.createFolderAndMountAsVolume(container, "/metrics");
372 | Path confMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/conf");
373 | makeContainerDumpConfig( container );
374 | container.start();
375 |
376 | File conf = confMount.resolve( "neo4j.conf" ).toFile();
377 | Map<String, String> configurations = parseConfFile(conf);
378 | Assertions.assertFalse(configurations.containsKey(confNames.get( Setting.DIRECTORIES_METRICS ).name),
379 | "should not be setting any metrics configurations in community edition");
380 | }
381 | }
382 |
383 | @Test
384 | void testCommunityDoesNotHaveEnterpriseConfigs() throws Exception
385 | {
386 | Assumptions.assumeTrue(TestSettings.EDITION == TestSettings.Edition.COMMUNITY,
387 | "This is testing only COMMUNITY EDITION configs");
388 |
389 | Path debugLog;
390 | try(GenericContainer container = createContainer()
391 | .withEnv(confNames.get(Setting.MEMORY_PAGECACHE_SIZE).envName, "512m")
392 | .waitingFor(WaitStrategies.waitForNeo4jReady(PASSWORD)))
393 | {
394 | //Mount /logs
395 | Path logMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/logs");
396 | debugLog = logMount.resolve( "debug.log" );
397 | SetContainerUser.nonRootUser( container );
398 | //Start the container
399 | container.start();
400 | }
401 |
402 | //Read debug.log to check that cluster confs are not present
403 | assertConfigurationPresentInDebugLog( debugLog, confNames.get(Setting.CLUSTER_TRANSACTION_ADDRESS), "*", false );
404 | }
405 |
406 | @Test
407 | @Tag("BundleTest")
408 | void testSettingAppendsToConfFileWithoutEmptyLine_neo4jPlugins() throws Exception
409 | {
410 | String expectedPageCacheSize = "1000.00MiB";
411 | String pluginStr = "[\"apoc\"]";
412 | if(TestSettings.NEO4J_VERSION.isOlderThan( Neo4jVersion.NEO4J_VERSION_500 ))
413 | {
414 | pluginStr = "[\"apoc-core\"]";
415 | }
416 |
417 | try(GenericContainer container = createContainer().waitingFor(WaitStrategies.waitForNeo4jReady(PASSWORD)))
418 | {
419 | Path confMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/conf");
420 | Files.copy( confFolder.resolve( "NoNewline.conf" ), confMount.resolve( "neo4j.conf" ) );
421 | container.withEnv( Neo4jPluginEnv.get(), pluginStr );
422 | //Start the container
423 | container.start();
424 | DatabaseIO dbio = new DatabaseIO( container );
425 | try
426 | {
427 | dbio.runCypherQuery( "neo4j", PASSWORD, "RETURN apoc.version()" );
428 | }
429 | catch( ClientException ex )
430 | {
431 | Assertions.fail("Did not load apoc plugin.", ex);
432 | }
433 | dbio.verifyConfigurationSetting( "neo4j",
434 | PASSWORD,
435 | confNames.get( Setting.MEMORY_PAGECACHE_SIZE ),
436 | expectedPageCacheSize);
437 | }
438 | }
439 |
440 | @Test
441 | void testSettingAppendsToConfFileWithoutEmptyLine_envSetting() throws Exception
442 | {
443 | String expectedHeapSize = "128.00MiB";
444 | String expectedPageCacheSize = "1000.00MiB";
445 |
446 | try(GenericContainer container = createContainer())
447 | {
448 | Path confMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/conf");
449 | Files.copy( confFolder.resolve( "NoNewline.conf" ), confMount.resolve( "neo4j.conf" ) );
450 | // set an env variable
451 | container.withEnv( confNames.get( Setting.MEMORY_HEAP_MAXSIZE ).envName, expectedHeapSize )
452 | .waitingFor(WaitStrategies.waitForNeo4jReady(PASSWORD));
453 | //Start the container
454 | container.start();
455 | DatabaseIO dbio = new DatabaseIO( container );
456 | dbio.verifyConfigurationSetting( "neo4j",
457 | PASSWORD,
458 | confNames.get( Setting.MEMORY_HEAP_MAXSIZE ),
459 | expectedHeapSize);
460 | dbio.verifyConfigurationSetting( "neo4j",
461 | PASSWORD,
462 | confNames.get( Setting.MEMORY_PAGECACHE_SIZE ),
463 | expectedPageCacheSize);
464 | }
465 | }
466 |
467 | @Test
468 | void testApocEnvVarsAreWrittenToApocConf() throws Exception
469 | {
470 | Assumptions.assumeTrue( TestSettings.NEO4J_VERSION.isAtLeastVersion( new Neo4jVersion( 5,3, 0 ) ),
471 | "APOC conf not present before 5.0 and this bug wasn't fixed before 5.3.");
472 |
473 | Path confMount;
474 | try(GenericContainer container = createContainer())
475 | {
476 | container.withEnv( confNames.get( Setting.APOC_EXPORT_FILE_ENABLED ).envName, "true" );
477 | container.withEnv( Neo4jPluginEnv.get(), "[\"apoc\"]" );
478 | confMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/conf");
479 | makeContainerDumpConfig( container );
480 | container.start();
481 | }
482 | // there's no way to verify that APOC configurations have been set by querying neo4j or the debug log,
483 | // so the only verification we can do is check that neo4j started ok and that there is an apoc.conf dumped.
484 | File apocConf = confMount.resolve( "apoc.conf" ).toFile();
485 | Assertions.assertTrue( apocConf.exists(), "Did not create an apoc.conf to contain the apoc settings." );
486 | Map<String,String> actualApocSettings = parseConfFile( apocConf );
487 | Assertions.assertTrue(actualApocSettings.containsKey(confNames.get(Setting.APOC_EXPORT_FILE_ENABLED).name),
488 | "APOC setting not added to apoc.conf");
489 | Assertions.assertEquals("true",
490 | actualApocSettings.get(confNames.get( Setting.APOC_EXPORT_FILE_ENABLED).name),
491 | "Incorrect value written for APOC setting");
492 | }
493 |
494 | @Test
495 | void testShellExpansionAvoided() throws Exception
496 | {
497 | Assumptions.assumeTrue( TestSettings.NEO4J_VERSION.isAtLeastVersion( Neo4jVersion.NEO4J_VERSION_400),
498 | "test only applicable to 4.0 and beyond." );
499 |
500 | Path confMount;
501 | try(GenericContainer container = createContainer()
502 | .withEnv(confNames.get(Setting.SECURITY_PROCEDURES_UNRESTRICTED).envName, "*"))
503 | {
504 | confMount = temporaryFolderManager.createFolderAndMountAsVolume(container, "/conf");
505 | makeContainerDumpConfig( container );
506 | container.start();
507 | }
508 | File conf = confMount.resolve( "neo4j.conf" ).toFile();
509 | Map<String, String> configurations = parseConfFile(conf);
510 | Assertions.assertTrue(configurations.containsKey(confNames.get( Setting.SECURITY_PROCEDURES_UNRESTRICTED).name),
511 | "configuration not set from env var");
512 | Assertions.assertEquals("*",
513 | configurations.get(confNames.get( Setting.SECURITY_PROCEDURES_UNRESTRICTED).name),
514 | "Configuration value should be *. If it's not docker-entrypoint.sh probably evaluated it as a glob expression.");
515 | }
516 | }
517 |
```
--------------------------------------------------------------------------------
/neo4j/docker-neo4j/docker-image-src/calver/coredb/docker-entrypoint.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash -eu
2 |
3 | cmd="$1"
4 |
5 | # load useful utility functions
6 | . /startup/utilities.sh
7 |
8 | function is_readable
9 | {
10 | # this code is fairly ugly but works no matter who this script is running as.
11 | # It would be nice if the writability tests could use this logic somehow.
12 | local _file=${1}
13 | perm=$(stat -c %a "${_file}")
14 |
15 | # everyone permission
16 | if [[ ${perm:2:1} -ge 4 ]]; then
17 | return 0
18 | fi
19 | # owner permissions
20 | if [[ ${perm:0:1} -ge 4 ]]; then
21 | if [[ "$(stat -c %U ${_file})" = "${userid}" ]] || [[ "$(stat -c %u ${_file})" = "${userid}" ]]; then
22 | return 0
23 | fi
24 | fi
25 | # group permissions
26 | if [[ ${perm:1:1} -ge 4 ]]; then
27 | if containsElement "$(stat -c %g ${_file})" "${groups[@]}" || containsElement "$(stat -c %G ${_file})" "${groups[@]}" ; then
28 | return 0
29 | fi
30 | fi
31 | return 1
32 | }
33 |
34 | function is_writable
35 | {
36 | # It would be nice if this and the is_readable function could combine somehow
37 | local _file=${1}
38 | perm=$(stat -c %a "${_file}")
39 |
40 | # everyone permission
41 | if containsElement ${perm:2:1} 2 3 6 7; then
42 | return 0
43 | fi
44 | # owner permissions
45 | if containsElement ${perm:0:1} 2 3 6 7; then
46 | if [[ "$(stat -c %U ${_file})" = "${userid}" ]] || [[ "$(stat -c %u ${_file})" = "${userid}" ]]; then
47 | return 0
48 | fi
49 | fi
50 | # group permissions
51 | if containsElement ${perm:1:1} 2 3 6 7; then
52 | if containsElement "$(stat -c %g ${_file})" "${groups[@]}" || containsElement "$(stat -c %G ${_file})" "${groups[@]}" ; then
53 | return 0
54 | fi
55 | fi
56 | return 1
57 | }
58 |
59 | function check_mounted_folder_readable
60 | {
61 | local _directory=${1}
62 | debug_msg "checking ${_directory} is readable"
63 | if ! is_readable "${_directory}"; then
64 | print_permissions_advice_and_fail "${_directory}" "${userid}" "${groupid}"
65 | fi
66 | }
67 |
68 | function check_mounted_folder_writable_with_chown
69 | {
70 | # The /data and /log directory are a bit different because they are very likely to be mounted by the user but not
71 | # necessarily writable.
72 | # This depends on whether a user ID is passed to the container and which folders are mounted.
73 | #
74 | # No user ID passed to container:
75 | # 1) No folders are mounted.
76 | # The /data and /log folder are owned by neo4j by default, so should be writable already.
77 | # 2) Both /log and /data are mounted.
78 | # This means on start up, /data and /logs are owned by an unknown user and we should chown them to neo4j for
79 | # backwards compatibility.
80 | #
81 | # User ID passed to container:
82 | # 1) Both /data and /logs are mounted
83 | # The /data and /logs folders are owned by an unknown user but we *should* have rw permission to them.
84 | # That should be verified and error (helpfully) if not.
85 | # 2) User mounts /data or /logs *but not both*
86 | # The unmounted folder is still owned by neo4j, which should already be writable. The mounted folder should
87 | # have rw permissions through user id. This should be verified.
88 | # 3) No folders are mounted.
89 | # The /data and /log folder are owned by neo4j by default, and these are already writable by the user.
90 | # (This is a very unlikely use case).
91 |
92 | local mountFolder=${1}
93 | debug_msg "checking ${mountFolder} is writable"
94 | if running_as_root && ! secure_mode_enabled; then
95 | # check folder permissions
96 | if ! is_writable "${mountFolder}" ; then
97 | # warn that we're about to chown the folder and then chown it
98 | echo "Warning: Folder mounted to \"${mountFolder}\" is not writable from inside container. Changing folder owner to ${userid}."
99 | chown -R "${userid}":"${groupid}" "${mountFolder}"
100 | # check permissions on files in the folder
101 | elif [ $(su-exec "${userid}":"${groupid}" find "${mountFolder}" -not -writable | wc -l) -gt 0 ]; then
102 | echo "Warning: Some files inside \"${mountFolder}\" are not writable from inside container. Changing folder owner to ${userid}."
103 | chown -R "${userid}":"${groupid}" "${mountFolder}"
104 | fi
105 | else
106 | if [[ ! -w "${mountFolder}" ]] && [[ "$(stat -c %U ${mountFolder})" != "neo4j" ]]; then
107 | print_permissions_advice_and_fail "${mountFolder}" "${userid}" "${groupid}"
108 | fi
109 | fi
110 | }
111 |
112 | function load_plugin_from_location
113 | {
114 | # Install a plugin from location at runtime.
115 | local _plugin_name="${1}"
116 | local _location="${2}"
117 |
118 | local _plugins_dir="${NEO4J_HOME}/plugins"
119 | if [ -d /plugins ]; then
120 | local _plugins_dir="/plugins"
121 | fi
122 |
123 | local _destination="${_plugins_dir}/${_plugin_name}.jar"
124 |
125 | # Now we install the plugin that is shipped with Neo4j
126 | for filename in ${_location}; do
127 | echo "Installing Plugin '${_plugin_name}' from ${_location} to ${_destination}"
128 | cp --preserve "${filename}" "${_destination}"
129 | chmod +rw ${_destination}
130 | done
131 |
132 | if ! is_readable "${_destination}"; then
133 | echo >&2 "Plugin at '${_destination}' is not readable"
134 | exit 1
135 | fi
136 | }
137 |
138 | function load_plugin_from_url
139 | {
140 | # Load a plugin at runtime. The provided github repository must have a versions.json on the master branch with the
141 | # correct format.
142 | local _plugin_name="${1}" #e.g. apoc, graph-algorithms, graph-ql
143 |
144 | local _plugins_dir="${NEO4J_HOME}/plugins"
145 | if [ -d /plugins ]; then
146 | local _plugins_dir="/plugins"
147 | fi
148 | local _versions_json_url="$(jq --raw-output "with_entries( select(.key==\"${_plugin_name}\") ) | to_entries[] | .value.versions" /startup/neo4j-plugins.json )"
149 | debug_msg "Will read ${_plugin_name} versions.json from ${_versions_json_url}"
150 | # Using the same name for the plugin irrespective of version ensures we don't end up with different versions of the same plugin
151 | local _destination="${_plugins_dir}/${_plugin_name}.jar"
152 | local _neo4j_version="$(neo4j --version | cut -d' ' -f2)"
153 |
154 | # Now we call out to github to get the versions.json for this plugin and we parse that to find the url for the correct plugin jar for our neo4j version
155 | echo "Fetching versions.json for Plugin '${_plugin_name}' from ${_versions_json_url}"
156 | local _versions_json
157 | if ! _versions_json="$(wget -q --timeout 300 --tries 30 -O - "${_versions_json_url}")"; then
158 | debug_msg "ERROR: could not fetch '${_versions_json}'"
159 | echo >&2 "ERROR: could not query ${_versions_json_url} for plugin compatibility information.
160 | This could indicate a problem with your network or this container's network settings.
161 | Neo4j will continue to start, but \"${_plugin_name}\" will not be loaded."
162 | return 1
163 | fi
164 | local _plugin_jar_url="$(echo "${_versions_json}" | jq -L/startup --raw-output "import \"semver\" as lib; [ .[] | select(.neo4j|lib::semver(\"${_neo4j_version}\")) ] | min_by(.neo4j) | .jar")"
165 | if [[ -z "${_plugin_jar_url}" ]] || [[ "${_plugin_jar_url}" == "null" ]]; then
166 | debug_msg "ERROR: '${_versions_json_url}' does not contain an entry for ${_neo4j_version}"
167 | echo >&2 "ERROR: No compatible \"${_plugin_name}\" plugin found for Neo4j ${_neo4j_version} ${NEO4J_EDITION}.
168 | This can happen with the newest Neo4j versions when a compatible plugin has not yet been released.
169 | You can either use an older version of Neo4j, or continue without ${_plugin_name}.
170 | Neo4j will continue to start, but \"${_plugin_name}\" will not be loaded."
171 | else
172 | echo "Installing Plugin '${_plugin_name}' from ${_plugin_jar_url} to ${_destination} "
173 | wget -q --timeout 300 --tries 30 --output-document="${_destination}" "${_plugin_jar_url}"
174 |
175 | if ! is_readable "${_destination}"; then
176 | echo >&2 "Plugin at '${_destination}' is not readable"
177 | exit 1
178 | fi
179 | fi
180 | }
181 |
182 | function apply_plugin_default_configuration
183 | {
184 | # Set the correct Load a plugin at runtime. The provided github repository must have a versions.json on the master branch with the
185 | # correct format.
186 | local _plugin_name="${1}" #e.g. apoc, graph-algorithms, graphql
187 | local _reference_conf="${2}" # used to determine if we can override properties
188 | local _neo4j_conf="${NEO4J_HOME}/conf/neo4j.conf"
189 |
190 | local _property _value
191 | echo "Applying default values for plugin ${_plugin_name} to neo4j.conf"
192 | for _entry in $(jq --compact-output --raw-output "with_entries( select(.key==\"${_plugin_name}\") ) | to_entries[] | .value.properties | to_entries[]" /startup/neo4j-plugins.json); do
193 | _property="$(jq --raw-output '.key' <<< "${_entry}")"
194 | _value="$(jq --raw-output '.value' <<< "${_entry}")"
195 | debug_msg "${_plugin_name} requires setting ${_property}=${_value}"
196 |
197 | # the first grep strips out comments
198 | if grep -o "^[^#]*" "${_reference_conf}" | grep -q --fixed-strings "${_property}=" ; then
199 | # property is already set in the user provided config. In this case we don't override what has been set explicitly by the user.
200 | echo "Skipping ${_property} for plugin ${_plugin_name} because it is already set."
201 | echo "You may need to add ${_value} to the ${_property} setting in your configuration file."
202 | else
203 | if grep -o "^[^#]*" "${_neo4j_conf}" | grep -q --fixed-strings "${_property}=" ; then
204 | sed --in-place "s/${_property}=/&${_value},/" "${_neo4j_conf}"
205 | debug_msg "${_property} was already in the configuration file, so ${_value} was added to it."
206 | else
207 | echo -e "\n${_property}=${_value}" >> "${_neo4j_conf}"
208 | debug_msg "${_property}=${_value} has been added to the configuration file."
209 | fi
210 | fi
211 | done
212 | }
213 |
214 | function install_neo4j_plugins
215 | {
216 | # first verify that the requested plugins are valid.
217 | debug_msg "One or more NEO4J_PLUGINS have been requested."
218 | local _known_plugins=($(jq --raw-output "keys[]" /startup/neo4j-plugins.json))
219 | debug_msg "Checking requested plugins are known and can be installed."
220 | for plugin_name in $(echo "${NEO4J_PLUGINS}" | jq --raw-output '.[]'); do
221 | if ! containsElement "${plugin_name}" "${_known_plugins[@]}"; then
222 | printf >&2 "\"%s\" is not a known Neo4j plugin. Options are:\n%s" "${plugin_name}" "$(jq --raw-output "keys[1:][]" /startup/neo4j-plugins.json)"
223 | exit 1
224 | fi
225 | done
226 |
227 | # We store a copy of the config before we modify it for the plugins to allow us to see if there are user-set values in the input config that we shouldn't override
228 | local _old_config="$(mktemp)"
229 | if [ -e "${NEO4J_HOME}"/conf/neo4j.conf ]; then
230 | cp "${NEO4J_HOME}"/conf/neo4j.conf "${_old_config}"
231 | else
232 | touch "${NEO4J_HOME}"/conf/neo4j.conf
233 | touch "${_old_config}"
234 | fi
235 | for plugin_name in $(echo "${NEO4J_PLUGINS}" | jq --raw-output '.[]'); do
236 | debug_msg "Plugin ${plugin_name} has been requested"
237 | local _location="$(jq --raw-output "with_entries( select(.key==\"${plugin_name}\") ) | to_entries[] | .value.location" /startup/neo4j-plugins.json )"
238 | if [ "${_location}" != "null" -a -n "$(shopt -s nullglob; echo ${_location})" ]; then
239 | debug_msg "$plugin_name is already in the container at ${_location}"
240 | load_plugin_from_location "${plugin_name}" "${_location}"
241 | debug_msg "Applying plugin specific configurations."
242 | apply_plugin_default_configuration "${plugin_name}" "${_old_config}"
243 | else
244 | debug_msg "$plugin_name must be downloaded."
245 | if load_plugin_from_url "${plugin_name}"; then
246 | debug_msg "Applying plugin specific configurations."
247 | apply_plugin_default_configuration "${plugin_name}" "${_old_config}"
248 | fi
249 | fi
250 | done
251 | rm "${_old_config}"
252 | }
253 |
254 | function add_docker_default_to_conf
255 | {
256 | # docker defaults should NOT overwrite values already in the conf file
257 | local _setting="${1}"
258 | local _value="${2}"
259 |
260 | if [ ! -e "${NEO4J_HOME}"/conf/neo4j.conf ] || ! grep -q "^${_setting}=" "${NEO4J_HOME}"/conf/neo4j.conf
261 | then
262 | debug_msg "Appended ${_setting}=${_value} to ${NEO4J_HOME}/conf/neo4j.conf"
263 | echo -e "\n"${_setting}=${_value} >> "${NEO4J_HOME}"/conf/neo4j.conf
264 | fi
265 | }
266 |
267 | function add_env_setting_to_conf
268 | {
269 | # settings from environment variables should overwrite values already in the conf
270 | local _setting=${1}
271 | local _value=${2}
272 | local _conf_file
273 | local _append_not_replace_configs=("server.jvm.additional")
274 |
275 | # different settings need to go in different files now.
276 | case "$(echo ${_setting} | cut -d . -f 1)" in
277 | apoc)
278 | _conf_file="${NEO4J_HOME}"/conf/apoc.conf
279 | ;;
280 | *)
281 | _conf_file="${NEO4J_HOME}"/conf/neo4j.conf
282 | ;;
283 | esac
284 |
285 | if [ -e "${_conf_file}" ] && grep -q -F "${_setting}=" "${_conf_file}"; then
286 | if containsElement "${_setting}" "${_append_not_replace_configs[@]}"; then
287 | debug_msg "${_setting} will be appended to ${_conf_file} without replacing existing settings."
288 | else
289 | # Remove any lines containing the setting already
290 | debug_msg "Removing existing setting for ${_setting} in ${_conf_file}"
291 | sed --in-place "/^${_setting}=.*/d" "${_conf_file}"
292 | fi
293 | fi
294 | # Then always append setting to file
295 | debug_msg "Appended ${_setting}=${_value} to ${_conf_file}"
296 | echo "${_setting}=${_value}" >> "${_conf_file}"
297 | }
298 |
299 | function set_initial_password
300 | {
301 | local _neo4j_auth="${1}"
302 |
303 | # set the neo4j initial password only if you run the database server
304 | if [ "${cmd}" == "neo4j" ]; then
305 | if [ "${_neo4j_auth:-}" == "none" ]; then
306 | debug_msg "Authentication is requested to be unset"
307 | add_env_setting_to_conf "dbms.security.auth_enabled" "false"
308 | elif [[ "${_neo4j_auth:-}" =~ ^([^/]+)\/([^/]+)/?([tT][rR][uU][eE])?$ ]]; then
309 | admin_user="${BASH_REMATCH[1]}"
310 | password="${BASH_REMATCH[2]}"
311 | do_reset="${BASH_REMATCH[3]}"
312 |
313 | if [ "${password}" == "neo4j" ]; then
314 | echo >&2 "Invalid value for password. It cannot be 'neo4j', which is the default."
315 | exit 1
316 | fi
317 | if [ "${admin_user}" != "neo4j" ]; then
318 | echo >&2 "Invalid admin username, it must be neo4j."
319 | exit 1
320 | fi
321 |
322 | # this line has an inbuilt assumption that any configuration settings from the environment have already been applied to neo4j.conf
323 | local _min_password_length=$(cat "${NEO4J_HOME}"/conf/neo4j.conf | grep dbms.security.auth_minimum_password_length | sed -E 's/.*=(.*)/\1/')
324 | if [ "${#password}" -lt "${_min_password_length:-"8"}" ]; then
325 | echo >&2 "Invalid value for password. The minimum password length is 8 characters.
326 | If Neo4j fails to start, you can:
327 | 1) Use a stronger password.
328 | 2) Set configuration dbms.security.auth_minimum_password_length to override the minimum password length requirement.
329 | 3) Set environment variable NEO4J_dbms_security_auth__minimum__password__length to override the minimum password length requirement."
330 | fi
331 |
332 | if running_as_root; then
333 | # running set-initial-password as root will create subfolders to /data as root, causing startup fail when neo4j can't read or write the /data/dbms folder
334 | # creating the folder first will avoid that
335 | mkdir -p /data/dbms
336 | debug_msg "Making sure /data/dbms is owned by ${userid}:${groupid}"
337 | chown "${userid}":"${groupid}" /data/dbms
338 | fi
339 |
340 | local extra_args=()
341 | if [ "${do_reset}" == "true" ]; then
342 | extra_args+=("--require-password-change")
343 | fi
344 | if [ "${EXTENDED_CONF+"yes"}" == "yes" ]; then
345 | extra_args+=("--expand-commands")
346 | fi
347 | if debugging_enabled; then
348 | extra_args+=("--verbose")
349 | fi
350 | debug_msg "Setting initial password"
351 | debug_msg "${neo4j_admin_cmd} dbms set-initial-password ***** ${extra_args[*]}"
352 | ${neo4j_admin_cmd} dbms set-initial-password "${password}" "${extra_args[@]}"
353 |
354 | elif [ -n "${_neo4j_auth:-}" ]; then
355 | echo "$_neo4j_auth is invalid"
356 | echo >&2 "Invalid value for NEO4J_AUTH: '${_neo4j_auth}'"
357 | exit 1
358 | fi
359 | fi
360 | }
361 |
362 | # ==== CODE STARTS ====
363 | debug_msg "DEBUGGING ENABLED"
364 |
365 | # If we're running as root, then run as the neo4j user. Otherwise
366 | # docker is running with --user and we simply use that user. Note
367 | # that su-exec, despite its name, does not replicate the functionality
368 | # of exec, so we need to use both
369 | if running_as_root; then
370 | userid="neo4j"
371 | groupid="neo4j"
372 | groups=($(id -G neo4j))
373 | exec_cmd="exec su-exec neo4j:neo4j"
374 | neo4j_admin_cmd="su-exec neo4j:neo4j neo4j-admin"
375 | debug_msg "Running as root user inside neo4j image"
376 | else
377 | userid="$(id -u)"
378 | groupid="$(id -g)"
379 | groups=($(id -G))
380 | exec_cmd="exec"
381 | neo4j_admin_cmd="neo4j-admin"
382 | debug_msg "Running as user ${userid}:${groupid} inside neo4j image"
383 | fi
384 | readonly userid
385 | readonly groupid
386 | readonly groups
387 | readonly exec_cmd
388 | readonly neo4j_admin_cmd
389 |
390 | # Need to chown the home directory
391 | if running_as_root; then
392 | debug_msg "chowning ${NEO4J_HOME} recursively to ${userid}":"${groupid}"
393 | chown -R "${userid}":"${groupid}" "${NEO4J_HOME}"
394 | chmod 700 "${NEO4J_HOME}"
395 | find "${NEO4J_HOME}" -mindepth 1 -maxdepth 1 -type d -exec chmod -R 700 {} \;
396 | debug_msg "Setting all files in ${NEO4J_HOME}/conf to permissions 600"
397 | find "${NEO4J_HOME}"/conf -type f -exec chmod -R 600 {} \;
398 | fi
399 |
400 | ## == EXTRACT SECRETS FROM FILES ===
401 | # These environment variables are set by using docker secrets and they override their equivalent env vars
402 | # They are suffixed with _FILE and prefixed by the name of the env var they should override
403 | # e.g. NEO4J_AUTH_FILE will override the value of the NEO4J_AUTH
404 | # It's best to do this first so that the secrets are available for the rest of the script
405 | for variable_name in $(printenv | awk -F= '{print $1}'); do
406 | # Check if the variable ends with "_FILE" and starts with "NEO4J_"
407 | if [[ $variable_name == *"_FILE" &&
408 | $variable_name == "NEO4J_"* ]]; then
409 | # Create a new variable name by removing the "_FILE" suffix
410 | base_variable_name=${variable_name%_FILE}
411 |
412 | # Get the value of the _FILE variable
413 | secret_file_path="${!variable_name}"
414 |
415 | if is_readable "${secret_file_path}"; then
416 | # Read the secret value from the file
417 | secret_value=$(<"$secret_file_path")
418 | else
419 | # File not readable
420 | echo >&2 "The secret file '$secret_file_path' does not exist or is not readable. Make sure you have correctly configured docker secrets."
421 | exit 1
422 | fi
423 | # Assign the value to the new variable
424 | export "$base_variable_name"="$secret_value"
425 | fi
426 | done
427 |
428 | # ==== CHECK LICENSE AGREEMENT ====
429 |
430 | # Only prompt for license agreement if command contains "neo4j" in it
431 | if [[ "${cmd}" == *"neo4j"* ]]; then
432 | if [ "${NEO4J_EDITION}" == "enterprise" ]; then
433 | : ${NEO4J_ACCEPT_LICENSE_AGREEMENT:="not accepted"}
434 | if [[ "$NEO4J_ACCEPT_LICENSE_AGREEMENT" != "yes" && "$NEO4J_ACCEPT_LICENSE_AGREEMENT" != "eval" ]]; then
435 | echo >&2 "
436 | In order to use Neo4j Enterprise Edition you must accept the license agreement.
437 |
438 | The license agreement is available at https://neo4j.com/terms/licensing/
439 | If you have a support contract the following terms apply https://neo4j.com/terms/support-terms/
440 |
441 | If you do not have a commercial license and want to evaluate the Software
442 | please read the terms of the evaluation agreement before you accept.
443 | https://neo4j.com/terms/enterprise_us/
444 |
445 | (c) Neo4j Sweden AB. All Rights Reserved.
446 | Use of this Software without a proper commercial license, or evaluation license
447 | with Neo4j, Inc. or its affiliates is prohibited.
448 | Neo4j has the right to terminate your usage if you are not compliant.
449 |
450 | More information is also available at: https://neo4j.com/licensing/
451 | If you have further inquiries about licensing, please contact us via https://neo4j.com/contact-us/
452 |
453 | To accept the commercial license agreement set the environment variable
454 | NEO4J_ACCEPT_LICENSE_AGREEMENT=yes
455 |
456 | To accept the terms of the evaluation agreement set the environment variable
457 | NEO4J_ACCEPT_LICENSE_AGREEMENT=eval
458 |
459 | To do this you can use the following docker argument:
460 |
461 | --env=NEO4J_ACCEPT_LICENSE_AGREEMENT=<yes|eval>
462 | "
463 | exit 1
464 | fi
465 | fi
466 | fi
467 |
468 | # NEO4JLABS_PLUGINS has been renamed to NEO4J_PLUGINS, but we want the old name to work for now.
469 | if [ -n "${NEO4JLABS_PLUGINS:-}" ];
470 | then
471 | echo >&2 "NEO4JLABS_PLUGINS has been renamed to NEO4J_PLUGINS since Neo4j 5.0.0.
472 | The old name will still work, but is likely to be deprecated in future releases."
473 | : ${NEO4J_PLUGINS:=${NEO4JLABS_PLUGINS:-}}
474 | fi
475 |
476 | # ==== CHECK FILE PERMISSIONS ON MOUNTED FOLDERS ====
477 |
478 |
479 | if [ -d /conf ]; then
480 | check_mounted_folder_readable "/conf"
481 | rm -rf "${NEO4J_HOME}"/conf/*
482 | debug_msg "Copying contents of /conf to ${NEO4J_HOME}/conf/*"
483 | find /conf -type f -exec cp --preserve=ownership,mode {} "${NEO4J_HOME}"/conf \;
484 | fi
485 |
486 | if [ -d /ssl ]; then
487 | check_mounted_folder_readable "/ssl"
488 | rm -rf "${NEO4J_HOME}"/certificates
489 | ln -s /ssl "${NEO4J_HOME}"/certificates
490 | fi
491 |
492 | if [ -d /plugins ]; then
493 | if [[ -n "${NEO4J_PLUGINS:-}" ]]; then
494 | # We need write permissions to write the required plugins to /plugins
495 | debug_msg "Extra plugins were requested. Ensuring the mounted /plugins folder has the required write permissions."
496 | check_mounted_folder_writable_with_chown "/plugins"
497 | fi
498 | check_mounted_folder_readable "/plugins"
499 | : ${NEO4J_server_directories_plugins:="/plugins"}
500 | fi
501 |
502 | if [ -d /import ]; then
503 | check_mounted_folder_readable "/import"
504 | : ${NEO4J_server_directories_import:="/import"}
505 | fi
506 |
507 | if [ -d /metrics ]; then
508 | # metrics is enterprise only
509 | if [ "${NEO4J_EDITION}" == "enterprise" ];
510 | then
511 | check_mounted_folder_writable_with_chown "/metrics"
512 | : ${NEO4J_server_directories_metrics:="/metrics"}
513 | fi
514 | fi
515 |
516 | if [ -d /logs ]; then
517 | check_mounted_folder_writable_with_chown "/logs"
518 | : ${NEO4J_server_directories_logs:="/logs"}
519 | fi
520 |
521 | if [ -d /data ]; then
522 | check_mounted_folder_writable_with_chown "/data"
523 | if [ -d /data/databases ]; then
524 | check_mounted_folder_writable_with_chown "/data/databases"
525 | fi
526 | if [ -d /data/dbms ]; then
527 | check_mounted_folder_writable_with_chown "/data/dbms"
528 | fi
529 | if [ -d /data/transactions ]; then
530 | check_mounted_folder_writable_with_chown "/data/transactions"
531 | fi
532 | fi
533 |
534 | if [ -d /licenses ]; then
535 | check_mounted_folder_readable "/licenses"
536 | : ${NEO4J_server_directories_licenses:="/licenses"}
537 | fi
538 |
539 |
540 | # ==== LOAD PLUGINS ====
541 |
542 | if [[ -n "${NEO4J_PLUGINS:-}" ]]; then
543 | # NEO4J_PLUGINS should be a json array of plugins like '["graph-algorithms", "apoc", "streams", "graphql"]'
544 | install_neo4j_plugins
545 | fi
546 |
547 | # ==== RENAME LEGACY ENVIRONMENT CONF VARIABLES ====
548 |
549 | # Env variable naming convention:
550 | # - prefix NEO4J_
551 | # - double underscore char '__' instead of single underscore '_' char in the setting name
552 | # - underscore char '_' instead of dot '.' char in the setting name
553 | # Example:
554 | # NEO4J_server_tx__log_rotation_retention__policy env variable to set
555 | # server.tx_log.rotation.retention_policy setting
556 |
557 | # we only need to override the configurations with a docker specific override.
558 | # The other config renames will be taken care of inside Neo4j.
559 | : ${NEO4J_db_tx__log_rotation_retention__policy:=${NEO4J_dbms_tx__log_rotation_retention__policy:-}}
560 | : ${NEO4J_server_memory_pagecache_size:=${NEO4J_dbms_memory_pagecache_size:-}}
561 | : ${NEO4J_server_default__listen__address:=${NEO4J_dbms_default__listen__address:-}}
562 | if [ "${NEO4J_EDITION}" == "enterprise" ];
563 | then
564 | : ${NEO4J_server_cluster_advertised__address:=${NEO4J_causal__clustering_transaction__advertised__address:-}}
565 | : ${NEO4J_server_cluster_raft_advertised__address:=${NEO4J_causal__clustering_raft__advertised__address:-}}
566 | fi
567 |
568 | # ==== SET CONFIGURATIONS ====
569 |
570 | ## == DOCKER SPECIFIC DEFAULT CONFIGURATIONS ===
571 | ## these should not override *any* configurations set by the user
572 |
573 | debug_msg "Setting docker specific configuration overrides"
574 | add_docker_default_to_conf "server.memory.pagecache.size" "512M"
575 | add_docker_default_to_conf "server.default_listen_address" "0.0.0.0"
576 |
577 | # set enterprise only docker defaults
578 | if [ "${NEO4J_EDITION}" == "enterprise" ];
579 | then
580 | debug_msg "Setting docker specific Enterprise Edition overrides"
581 | add_docker_default_to_conf "server.cluster.advertised_address" "$(hostname):6000"
582 | add_docker_default_to_conf "server.cluster.raft.advertised_address" "$(hostname):7000"
583 | add_docker_default_to_conf "server.routing.advertised_address" "$(hostname):7688"
584 | fi
585 |
586 | ## == ENVIRONMENT VARIABLE CONFIGURATIONS ===
587 | ## these override BOTH defaults and any existing values in the neo4j.conf file
588 |
589 | # these are docker control envs that have the NEO4J_ prefix but we don't want to add to the config.
590 | not_configs=("NEO4J_ACCEPT_LICENSE_AGREEMENT" "NEO4J_AUTH" "NEO4J_AUTH_PATH" "NEO4J_DEBUG" "NEO4J_EDITION" \
591 | "NEO4J_HOME" "NEO4J_PLUGINS" "NEO4J_SHA256" "NEO4J_TARBALL" "NEO4J_DEPRECATION_WARNING")
592 |
593 | debug_msg "Applying configuration settings that have been set using environment variables."
594 | # list env variables with prefix NEO4J_ and create settings from them
595 | for i in $( set | grep ^NEO4J_ | awk -F'=' '{print $1}' | sort -rn ); do
596 | if containsElement "$i" "${not_configs[@]}"; then
597 | continue
598 | fi
599 |
600 | # Skip env variables with suffix _FILE, these are docker secrets
601 | if [[ "$i" == *"_FILE" ]]; then
602 | continue
603 | fi
604 |
605 | setting=$(echo "${i}" | sed 's|^NEO4J_||' | sed 's|_|.|g' | sed 's|\.\.|_|g')
606 | value=$(echo "${!i}")
607 | # Don't allow settings with no value or settings that start with a number (neo4j converts settings to env variables and you cannot have an env variable that starts with a number)
608 | if [[ -n ${value} ]]; then
609 | if [[ ! "${setting}" =~ ^[0-9]+.*$ ]]; then
610 | add_env_setting_to_conf "${setting}" "${value}"
611 | else
612 | echo >&2 "WARNING: ${setting} not written to conf file. Settings that start with a number are not permitted."
613 | fi
614 | fi
615 | done
616 |
617 | # ==== SET PASSWORD ====
618 |
619 | if [[ -n "${NEO4J_AUTH_PATH:-}" ]]; then
620 | # Validate the existence of the password file
621 | if [ ! -f "${NEO4J_AUTH_PATH}" ]; then
622 | echo >&2 "The password file '${NEO4J_AUTH_PATH}' does not exist"
623 | exit 1
624 | fi
625 | # validate the password file is readable
626 | check_mounted_folder_readable "${NEO4J_AUTH_PATH}"
627 |
628 | debug_msg "Setting initial password from file ${NEO4J_AUTH_PATH}"
629 | set_initial_password "$(cat ${NEO4J_AUTH_PATH})"
630 | else
631 | debug_msg "Setting initial password from environment"
632 | set_initial_password "${NEO4J_AUTH:-}"
633 | fi
634 |
635 | # ==== CLEANUP RUN FILE ====
636 |
637 | if [ -f "${NEO4J_HOME}"/run/neo4j.pid ];
638 | then
639 | rm "${NEO4J_HOME}"/run/neo4j.pid
640 | fi
641 |
642 | # ==== INVOKE NEO4J STARTUP ====
643 |
644 | [ -f "${EXTENSION_SCRIPT:-}" ] && . ${EXTENSION_SCRIPT}
645 |
646 | if [ "${cmd}" == "dump-config" ]; then
647 | if [ ! -d "/conf" ]; then
648 | echo >&2 "You must mount a folder to /conf so that the configuration file(s) can be dumped to there."
649 | exit 1
650 | fi
651 | check_mounted_folder_writable_with_chown "/conf"
652 | cp --recursive "${NEO4J_HOME}"/conf/* /conf
653 | echo "Config Dumped"
654 | exit 0
655 | fi
656 |
657 | # this prints out a command for us to run.
658 | # the command is something like: `java ...[lots of java options]... neo4j.mainClass ...[some neo4j options]...`
659 | # putting debug messages here causes the function to break
660 | function get_neo4j_run_cmd {
661 |
662 | local extra_args=()
663 |
664 | if [ "${EXTENDED_CONF+"yes"}" == "yes" ]; then
665 | extra_args+=("--expand-commands")
666 | fi
667 |
668 | if running_as_root; then
669 | su-exec neo4j:neo4j neo4j console --dry-run "${extra_args[@]}"
670 | else
671 | neo4j console --dry-run "${extra_args[@]}"
672 | fi
673 | }
674 |
675 | if [ "${cmd}" == "neo4j" ]; then
676 | # separate declaration and use of get_neo4j_run_cmd so that error codes are correctly surfaced
677 | debug_msg "getting full neo4j run command"
678 | neo4j_console_cmd="$(get_neo4j_run_cmd)"
679 | debug_msg "${exec_cmd} ${neo4j_console_cmd}"
680 | #%%DEPRECATION_WARNING_PLACEHOLDER%%
681 | eval ${exec_cmd} ${neo4j_console_cmd?:No Neo4j command was generated}
682 | else
683 | debug_msg "${exec_cmd}" "$@"
684 | ${exec_cmd} "$@"
685 | fi
686 |
```
--------------------------------------------------------------------------------
/neo4j/docker-neo4j/docker-image-src/5/coredb/docker-entrypoint.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash -eu
2 |
3 | cmd="$1"
4 |
5 | # load useful utility functions
6 | . /startup/utilities.sh
7 |
8 | function is_readable
9 | {
10 | # this code is fairly ugly but works no matter who this script is running as.
11 | # It would be nice if the writability tests could use this logic somehow.
12 | local _file=${1}
13 | perm=$(stat -c %a "${_file}")
14 |
15 | # everyone permission
16 | if [[ ${perm:2:1} -ge 4 ]]; then
17 | return 0
18 | fi
19 | # owner permissions
20 | if [[ ${perm:0:1} -ge 4 ]]; then
21 | if [[ "$(stat -c %U ${_file})" = "${userid}" ]] || [[ "$(stat -c %u ${_file})" = "${userid}" ]]; then
22 | return 0
23 | fi
24 | fi
25 | # group permissions
26 | if [[ ${perm:1:1} -ge 4 ]]; then
27 | if containsElement "$(stat -c %g ${_file})" "${groups[@]}" || containsElement "$(stat -c %G ${_file})" "${groups[@]}" ; then
28 | return 0
29 | fi
30 | fi
31 | return 1
32 | }
33 |
34 | function is_writable
35 | {
36 | # It would be nice if this and the is_readable function could combine somehow
37 | local _file=${1}
38 | perm=$(stat -c %a "${_file}")
39 |
40 | # everyone permission
41 | if containsElement ${perm:2:1} 2 3 6 7; then
42 | return 0
43 | fi
44 | # owner permissions
45 | if containsElement ${perm:0:1} 2 3 6 7; then
46 | if [[ "$(stat -c %U ${_file})" = "${userid}" ]] || [[ "$(stat -c %u ${_file})" = "${userid}" ]]; then
47 | return 0
48 | fi
49 | fi
50 | # group permissions
51 | if containsElement ${perm:1:1} 2 3 6 7; then
52 | if containsElement "$(stat -c %g ${_file})" "${groups[@]}" || containsElement "$(stat -c %G ${_file})" "${groups[@]}" ; then
53 | return 0
54 | fi
55 | fi
56 | return 1
57 | }
58 |
59 | function check_mounted_folder_readable
60 | {
61 | local _directory=${1}
62 | debug_msg "checking ${_directory} is readable"
63 | if ! is_readable "${_directory}"; then
64 | print_permissions_advice_and_fail "${_directory}" "${userid}" "${groupid}"
65 | fi
66 | }
67 |
68 | function check_mounted_folder_writable_with_chown
69 | {
70 | # The /data and /log directory are a bit different because they are very likely to be mounted by the user but not
71 | # necessarily writable.
72 | # This depends on whether a user ID is passed to the container and which folders are mounted.
73 | #
74 | # No user ID passed to container:
75 | # 1) No folders are mounted.
76 | # The /data and /log folder are owned by neo4j by default, so should be writable already.
77 | # 2) Both /log and /data are mounted.
78 | # This means on start up, /data and /logs are owned by an unknown user and we should chown them to neo4j for
79 | # backwards compatibility.
80 | #
81 | # User ID passed to container:
82 | # 1) Both /data and /logs are mounted
83 | # The /data and /logs folders are owned by an unknown user but we *should* have rw permission to them.
84 | # That should be verified and error (helpfully) if not.
85 | # 2) User mounts /data or /logs *but not both*
86 | # The unmounted folder is still owned by neo4j, which should already be writable. The mounted folder should
87 | # have rw permissions through user id. This should be verified.
88 | # 3) No folders are mounted.
89 | # The /data and /log folder are owned by neo4j by default, and these are already writable by the user.
90 | # (This is a very unlikely use case).
91 |
92 | local mountFolder=${1}
93 | debug_msg "checking ${mountFolder} is writable"
94 | if running_as_root && ! secure_mode_enabled; then
95 | # check folder permissions
96 | if ! is_writable "${mountFolder}" ; then
97 | # warn that we're about to chown the folder and then chown it
98 | echo "Warning: Folder mounted to \"${mountFolder}\" is not writable from inside container. Changing folder owner to ${userid}."
99 | chown -R "${userid}":"${groupid}" "${mountFolder}"
100 | # check permissions on files in the folder
101 | elif [ $(su-exec "${userid}":"${groupid}" find "${mountFolder}" -not -writable | wc -l) -gt 0 ]; then
102 | echo "Warning: Some files inside \"${mountFolder}\" are not writable from inside container. Changing folder owner to ${userid}."
103 | chown -R "${userid}":"${groupid}" "${mountFolder}"
104 | fi
105 | else
106 | if [[ ! -w "${mountFolder}" ]] && [[ "$(stat -c %U ${mountFolder})" != "neo4j" ]]; then
107 | print_permissions_advice_and_fail "${mountFolder}" "${userid}" "${groupid}"
108 | fi
109 | fi
110 | }
111 |
112 | function load_plugin_from_location
113 | {
114 | # Install a plugin from location at runtime.
115 | local _plugin_name="${1}"
116 | local _location="${2}"
117 |
118 | local _plugins_dir="${NEO4J_HOME}/plugins"
119 | if [ -d /plugins ]; then
120 | local _plugins_dir="/plugins"
121 | fi
122 |
123 | local _destination="${_plugins_dir}/${_plugin_name}.jar"
124 |
125 | # Now we install the plugin that is shipped with Neo4j
126 | for filename in ${_location}; do
127 | echo "Installing Plugin '${_plugin_name}' from ${_location} to ${_destination}"
128 | cp --preserve "${filename}" "${_destination}"
129 | chmod +rw ${_destination}
130 | done
131 |
132 | if ! is_readable "${_destination}"; then
133 | echo >&2 "Plugin at '${_destination}' is not readable"
134 | exit 1
135 | fi
136 | }
137 |
138 | function load_plugin_from_url
139 | {
140 | # Load a plugin at runtime. The provided github repository must have a versions.json on the master branch with the
141 | # correct format.
142 | local _plugin_name="${1}" #e.g. apoc, graph-algorithms, graph-ql
143 |
144 | local _plugins_dir="${NEO4J_HOME}/plugins"
145 | if [ -d /plugins ]; then
146 | local _plugins_dir="/plugins"
147 | fi
148 | local _versions_json_url="$(jq --raw-output "with_entries( select(.key==\"${_plugin_name}\") ) | to_entries[] | .value.versions" /startup/neo4j-plugins.json )"
149 | debug_msg "Will read ${_plugin_name} versions.json from ${_versions_json_url}"
150 | # Using the same name for the plugin irrespective of version ensures we don't end up with different versions of the same plugin
151 | local _destination="${_plugins_dir}/${_plugin_name}.jar"
152 | local _neo4j_version="$(neo4j --version | cut -d' ' -f2)"
153 |
154 | # Now we call out to github to get the versions.json for this plugin and we parse that to find the url for the correct plugin jar for our neo4j version
155 | echo "Fetching versions.json for Plugin '${_plugin_name}' from ${_versions_json_url}"
156 | local _versions_json
157 | if ! _versions_json="$(wget -q --timeout 300 --tries 30 -O - "${_versions_json_url}")"; then
158 | debug_msg "ERROR: could not fetch '${_versions_json}'"
159 | echo >&2 "ERROR: could not query ${_versions_json_url} for plugin compatibility information.
160 | This could indicate a problem with your network or this container's network settings.
161 | Neo4j will continue to start, but \"${_plugin_name}\" will not be loaded."
162 | return 1
163 | fi
164 | local _plugin_jar_url="$(echo "${_versions_json}" | jq -L/startup --raw-output "import \"semver\" as lib; [ .[] | select(.neo4j|lib::semver(\"${_neo4j_version}\")) ] | min_by(.neo4j) | .jar")"
165 | if [[ -z "${_plugin_jar_url}" ]] || [[ "${_plugin_jar_url}" == "null" ]]; then
166 | debug_msg "ERROR: '${_versions_json_url}' does not contain an entry for ${_neo4j_version}"
167 | echo >&2 "ERROR: No compatible \"${_plugin_name}\" plugin found for Neo4j ${_neo4j_version} ${NEO4J_EDITION}.
168 | This can happen with the newest Neo4j versions when a compatible plugin has not yet been released.
169 | You can either use an older version of Neo4j, or continue without ${_plugin_name}.
170 | Neo4j will continue to start, but \"${_plugin_name}\" will not be loaded."
171 | else
172 | echo "Installing Plugin '${_plugin_name}' from ${_plugin_jar_url} to ${_destination} "
173 | wget -q --timeout 300 --tries 30 --output-document="${_destination}" "${_plugin_jar_url}"
174 |
175 | if ! is_readable "${_destination}"; then
176 | echo >&2 "Plugin at '${_destination}' is not readable"
177 | exit 1
178 | fi
179 | fi
180 | }
181 |
182 | function apply_plugin_default_configuration
183 | {
184 | # Set the correct Load a plugin at runtime. The provided github repository must have a versions.json on the master branch with the
185 | # correct format.
186 | local _plugin_name="${1}" #e.g. apoc, graph-algorithms, graphql
187 | local _reference_conf="${2}" # used to determine if we can override properties
188 | local _neo4j_conf="${NEO4J_HOME}/conf/neo4j.conf"
189 |
190 | local _property _value
191 | echo "Applying default values for plugin ${_plugin_name} to neo4j.conf"
192 | for _entry in $(jq --compact-output --raw-output "with_entries( select(.key==\"${_plugin_name}\") ) | to_entries[] | .value.properties | to_entries[]" /startup/neo4j-plugins.json); do
193 | _property="$(jq --raw-output '.key' <<< "${_entry}")"
194 | _value="$(jq --raw-output '.value' <<< "${_entry}")"
195 | debug_msg "${_plugin_name} requires setting ${_property}=${_value}"
196 |
197 | # the first grep strips out comments
198 | if grep -o "^[^#]*" "${_reference_conf}" | grep -q --fixed-strings "${_property}=" ; then
199 | # property is already set in the user provided config. In this case we don't override what has been set explicitly by the user.
200 | echo "Skipping ${_property} for plugin ${_plugin_name} because it is already set."
201 | echo "You may need to add ${_value} to the ${_property} setting in your configuration file."
202 | else
203 | if grep -o "^[^#]*" "${_neo4j_conf}" | grep -q --fixed-strings "${_property}=" ; then
204 | sed --in-place "s/${_property}=/&${_value},/" "${_neo4j_conf}"
205 | debug_msg "${_property} was already in the configuration file, so ${_value} was added to it."
206 | else
207 | echo -e "\n${_property}=${_value}" >> "${_neo4j_conf}"
208 | debug_msg "${_property}=${_value} has been added to the configuration file."
209 | fi
210 | fi
211 | done
212 | }
213 |
214 | function install_neo4j_plugins
215 | {
216 | # first verify that the requested plugins are valid.
217 | debug_msg "One or more NEO4J_PLUGINS have been requested."
218 | local _known_plugins=($(jq --raw-output "keys[]" /startup/neo4j-plugins.json))
219 | debug_msg "Checking requested plugins are known and can be installed."
220 | for plugin_name in $(echo "${NEO4J_PLUGINS}" | jq --raw-output '.[]'); do
221 | if ! containsElement "${plugin_name}" "${_known_plugins[@]}"; then
222 | printf >&2 "\"%s\" is not a known Neo4j plugin. Options are:\n%s" "${plugin_name}" "$(jq --raw-output "keys[1:][]" /startup/neo4j-plugins.json)"
223 | exit 1
224 | fi
225 | done
226 |
227 | # We store a copy of the config before we modify it for the plugins to allow us to see if there are user-set values in the input config that we shouldn't override
228 | local _old_config="$(mktemp)"
229 | if [ -e "${NEO4J_HOME}"/conf/neo4j.conf ]; then
230 | cp "${NEO4J_HOME}"/conf/neo4j.conf "${_old_config}"
231 | else
232 | touch "${NEO4J_HOME}"/conf/neo4j.conf
233 | touch "${_old_config}"
234 | fi
235 | for plugin_name in $(echo "${NEO4J_PLUGINS}" | jq --raw-output '.[]'); do
236 | debug_msg "Plugin ${plugin_name} has been requested"
237 | local _location="$(jq --raw-output "with_entries( select(.key==\"${plugin_name}\") ) | to_entries[] | .value.location" /startup/neo4j-plugins.json )"
238 | if [ "${_location}" != "null" -a -n "$(shopt -s nullglob; echo ${_location})" ]; then
239 | debug_msg "$plugin_name is already in the container at ${_location}"
240 | load_plugin_from_location "${plugin_name}" "${_location}"
241 | debug_msg "Applying plugin specific configurations."
242 | apply_plugin_default_configuration "${plugin_name}" "${_old_config}"
243 | else
244 | debug_msg "$plugin_name must be downloaded."
245 | if load_plugin_from_url "${plugin_name}"; then
246 | debug_msg "Applying plugin specific configurations."
247 | apply_plugin_default_configuration "${plugin_name}" "${_old_config}"
248 | fi
249 | fi
250 | done
251 | rm "${_old_config}"
252 | }
253 |
254 | function add_docker_default_to_conf
255 | {
256 | # docker defaults should NOT overwrite values already in the conf file
257 | local _setting="${1}"
258 | local _value="${2}"
259 |
260 | if [ ! -e "${NEO4J_HOME}"/conf/neo4j.conf ] || ! grep -q "^${_setting}=" "${NEO4J_HOME}"/conf/neo4j.conf
261 | then
262 | debug_msg "Appended ${_setting}=${_value} to ${NEO4J_HOME}/conf/neo4j.conf"
263 | echo -e "\n"${_setting}=${_value} >> "${NEO4J_HOME}"/conf/neo4j.conf
264 | fi
265 | }
266 |
267 | function add_env_setting_to_conf
268 | {
269 | # settings from environment variables should overwrite values already in the conf
270 | local _setting=${1}
271 | local _value=${2}
272 | local _conf_file
273 | local _append_not_replace_configs=("server.jvm.additional")
274 |
275 | # different settings need to go in different files now.
276 | case "$(echo ${_setting} | cut -d . -f 1)" in
277 | apoc)
278 | _conf_file="${NEO4J_HOME}"/conf/apoc.conf
279 | ;;
280 | *)
281 | _conf_file="${NEO4J_HOME}"/conf/neo4j.conf
282 | ;;
283 | esac
284 |
285 | if [ -e "${_conf_file}" ] && grep -q -F "${_setting}=" "${_conf_file}"; then
286 | if containsElement "${_setting}" "${_append_not_replace_configs[@]}"; then
287 | debug_msg "${_setting} will be appended to ${_conf_file} without replacing existing settings."
288 | else
289 | # Remove any lines containing the setting already
290 | debug_msg "Removing existing setting for ${_setting} in ${_conf_file}"
291 | sed --in-place "/^${_setting}=.*/d" "${_conf_file}"
292 | fi
293 | fi
294 | # Then always append setting to file
295 | debug_msg "Appended ${_setting}=${_value} to ${_conf_file}"
296 | echo "${_setting}=${_value}" >> "${_conf_file}"
297 | }
298 |
299 | function set_initial_password
300 | {
301 | local _neo4j_auth="${1}"
302 |
303 | # set the neo4j initial password only if you run the database server
304 | if [ "${cmd}" == "neo4j" ]; then
305 | if [ "${_neo4j_auth:-}" == "none" ]; then
306 | debug_msg "Authentication is requested to be unset"
307 | add_env_setting_to_conf "dbms.security.auth_enabled" "false"
308 | elif [[ "${_neo4j_auth:-}" =~ ^([^/]+)\/([^/]+)/?([tT][rR][uU][eE])?$ ]]; then
309 | admin_user="${BASH_REMATCH[1]}"
310 | password="${BASH_REMATCH[2]}"
311 | do_reset="${BASH_REMATCH[3]}"
312 |
313 | if [ "${password}" == "neo4j" ]; then
314 | echo >&2 "Invalid value for password. It cannot be 'neo4j', which is the default."
315 | exit 1
316 | fi
317 | if [ "${admin_user}" != "neo4j" ]; then
318 | echo >&2 "Invalid admin username, it must be neo4j."
319 | exit 1
320 | fi
321 |
322 | # this line has an inbuilt assumption that any configuration settings from the environment have already been applied to neo4j.conf
323 | local _min_password_length=$(cat "${NEO4J_HOME}"/conf/neo4j.conf | grep dbms.security.auth_minimum_password_length | sed -E 's/.*=(.*)/\1/')
324 | if [ "${#password}" -lt "${_min_password_length:-"8"}" ]; then
325 | echo >&2 "Invalid value for password. The minimum password length is 8 characters.
326 | If Neo4j fails to start, you can:
327 | 1) Use a stronger password.
328 | 2) Set configuration dbms.security.auth_minimum_password_length to override the minimum password length requirement.
329 | 3) Set environment variable NEO4J_dbms_security_auth__minimum__password__length to override the minimum password length requirement."
330 | fi
331 |
332 | if running_as_root; then
333 | # running set-initial-password as root will create subfolders to /data as root, causing startup fail when neo4j can't read or write the /data/dbms folder
334 | # creating the folder first will avoid that
335 | mkdir -p /data/dbms
336 | debug_msg "Making sure /data/dbms is owned by ${userid}:${groupid}"
337 | chown "${userid}":"${groupid}" /data/dbms
338 | fi
339 |
340 | local extra_args=()
341 | if [ "${do_reset}" == "true" ]; then
342 | extra_args+=("--require-password-change")
343 | fi
344 | if [ "${EXTENDED_CONF+"yes"}" == "yes" ]; then
345 | extra_args+=("--expand-commands")
346 | fi
347 | if debugging_enabled; then
348 | extra_args+=("--verbose")
349 | fi
350 | debug_msg "Setting initial password"
351 | debug_msg "${neo4j_admin_cmd} dbms set-initial-password ***** ${extra_args[*]}"
352 | ${neo4j_admin_cmd} dbms set-initial-password "${password}" "${extra_args[@]}"
353 |
354 | elif [ -n "${_neo4j_auth:-}" ]; then
355 | echo "$_neo4j_auth is invalid"
356 | echo >&2 "Invalid value for NEO4J_AUTH: '${_neo4j_auth}'"
357 | exit 1
358 | fi
359 | fi
360 | }
361 |
362 | # ==== CODE STARTS ====
363 | debug_msg "DEBUGGING ENABLED"
364 |
365 | # If we're running as root, then run as the neo4j user. Otherwise
366 | # docker is running with --user and we simply use that user. Note
367 | # that su-exec, despite its name, does not replicate the functionality
368 | # of exec, so we need to use both
369 | if running_as_root; then
370 | userid="neo4j"
371 | groupid="neo4j"
372 | groups=($(id -G neo4j))
373 | exec_cmd="exec su-exec neo4j:neo4j"
374 | neo4j_admin_cmd="su-exec neo4j:neo4j neo4j-admin"
375 | debug_msg "Running as root user inside neo4j image"
376 | else
377 | userid="$(id -u)"
378 | groupid="$(id -g)"
379 | groups=($(id -G))
380 | exec_cmd="exec"
381 | neo4j_admin_cmd="neo4j-admin"
382 | debug_msg "Running as user ${userid}:${groupid} inside neo4j image"
383 | fi
384 | readonly userid
385 | readonly groupid
386 | readonly groups
387 | readonly exec_cmd
388 | readonly neo4j_admin_cmd
389 |
390 | # Need to chown the home directory
391 | if running_as_root; then
392 | debug_msg "chowning ${NEO4J_HOME} recursively to ${userid}":"${groupid}"
393 | chown -R "${userid}":"${groupid}" "${NEO4J_HOME}"
394 | chmod 700 "${NEO4J_HOME}"
395 | find "${NEO4J_HOME}" -mindepth 1 -maxdepth 1 -type d -exec chmod -R 700 {} \;
396 | debug_msg "Setting all files in ${NEO4J_HOME}/conf to permissions 600"
397 | find "${NEO4J_HOME}"/conf -type f -exec chmod -R 600 {} \;
398 | fi
399 |
400 | ## == EXTRACT SECRETS FROM FILES ===
401 | # These environment variables are set by using docker secrets and they override their equivalent env vars
402 | # They are suffixed with _FILE and prefixed by the name of the env var they should override
403 | # e.g. NEO4J_AUTH_FILE will override the value of the NEO4J_AUTH
404 | # It's best to do this first so that the secrets are available for the rest of the script
405 | for variable_name in $(printenv | awk -F= '{print $1}'); do
406 | # Check if the variable ends with "_FILE" and starts with "NEO4J_"
407 | if [[ $variable_name == *"_FILE" &&
408 | $variable_name == "NEO4J_"* ]]; then
409 | # Create a new variable name by removing the "_FILE" suffix
410 | base_variable_name=${variable_name%_FILE}
411 |
412 | # Get the value of the _FILE variable
413 | secret_file_path="${!variable_name}"
414 |
415 | if is_readable "${secret_file_path}"; then
416 | # Read the secret value from the file
417 | secret_value=$(<"$secret_file_path")
418 | else
419 | # File not readable
420 | echo >&2 "The secret file '$secret_file_path' does not exist or is not readable. Make sure you have correctly configured docker secrets."
421 | exit 1
422 | fi
423 | # Assign the value to the new variable
424 | export "$base_variable_name"="$secret_value"
425 | fi
426 | done
427 |
428 | # ==== CHECK LICENSE AGREEMENT ====
429 |
430 | # Only prompt for license agreement if command contains "neo4j" in it
431 | if [[ "${cmd}" == *"neo4j"* ]]; then
432 | if [ "${NEO4J_EDITION}" == "enterprise" ]; then
433 | : ${NEO4J_ACCEPT_LICENSE_AGREEMENT:="not accepted"}
434 | if [[ "$NEO4J_ACCEPT_LICENSE_AGREEMENT" != "yes" && "$NEO4J_ACCEPT_LICENSE_AGREEMENT" != "eval" ]]; then
435 | echo >&2 "
436 | In order to use Neo4j Enterprise Edition you must accept the license agreement.
437 |
438 | The license agreement is available at https://neo4j.com/terms/licensing/
439 | If you have a support contract the following terms apply https://neo4j.com/terms/support-terms/
440 |
441 | If you do not have a commercial license and want to evaluate the Software
442 | please read the terms of the evaluation agreement before you accept.
443 | https://neo4j.com/terms/enterprise_us/
444 |
445 | (c) Neo4j Sweden AB. All Rights Reserved.
446 | Use of this Software without a proper commercial license, or evaluation license
447 | with Neo4j, Inc. or its affiliates is prohibited.
448 | Neo4j has the right to terminate your usage if you are not compliant.
449 |
450 | More information is also available at: https://neo4j.com/licensing/
451 | If you have further inquiries about licensing, please contact us via https://neo4j.com/contact-us/
452 |
453 | To accept the commercial license agreement set the environment variable
454 | NEO4J_ACCEPT_LICENSE_AGREEMENT=yes
455 |
456 | To accept the terms of the evaluation agreement set the environment variable
457 | NEO4J_ACCEPT_LICENSE_AGREEMENT=eval
458 |
459 | To do this you can use the following docker argument:
460 |
461 | --env=NEO4J_ACCEPT_LICENSE_AGREEMENT=<yes|eval>
462 | "
463 | exit 1
464 | fi
465 | fi
466 | fi
467 |
468 | # NEO4JLABS_PLUGINS has been renamed to NEO4J_PLUGINS, but we want the old name to work for now.
469 | if [ -n "${NEO4JLABS_PLUGINS:-}" ];
470 | then
471 | echo >&2 "NEO4JLABS_PLUGINS has been renamed to NEO4J_PLUGINS since Neo4j 5.0.0.
472 | The old name will still work, but is likely to be deprecated in future releases."
473 | : ${NEO4J_PLUGINS:=${NEO4JLABS_PLUGINS:-}}
474 | fi
475 |
476 | # ==== CHECK FILE PERMISSIONS ON MOUNTED FOLDERS ====
477 |
478 |
479 | if [ -d /conf ]; then
480 | check_mounted_folder_readable "/conf"
481 | rm -rf "${NEO4J_HOME}"/conf/*
482 | debug_msg "Copying contents of /conf to ${NEO4J_HOME}/conf/*"
483 | find /conf -type f -exec cp --preserve=ownership,mode {} "${NEO4J_HOME}"/conf \;
484 | fi
485 |
486 | if [ -d /ssl ]; then
487 | check_mounted_folder_readable "/ssl"
488 | rm -rf "${NEO4J_HOME}"/certificates
489 | ln -s /ssl "${NEO4J_HOME}"/certificates
490 | fi
491 |
492 | if [ -d /plugins ]; then
493 | if [[ -n "${NEO4J_PLUGINS:-}" ]]; then
494 | # We need write permissions to write the required plugins to /plugins
495 | debug_msg "Extra plugins were requested. Ensuring the mounted /plugins folder has the required write permissions."
496 | check_mounted_folder_writable_with_chown "/plugins"
497 | fi
498 | check_mounted_folder_readable "/plugins"
499 | : ${NEO4J_server_directories_plugins:="/plugins"}
500 | fi
501 |
502 | if [ -d /import ]; then
503 | check_mounted_folder_readable "/import"
504 | : ${NEO4J_server_directories_import:="/import"}
505 | fi
506 |
507 | if [ -d /metrics ]; then
508 | # metrics is enterprise only
509 | if [ "${NEO4J_EDITION}" == "enterprise" ];
510 | then
511 | check_mounted_folder_writable_with_chown "/metrics"
512 | : ${NEO4J_server_directories_metrics:="/metrics"}
513 | fi
514 | fi
515 |
516 | if [ -d /logs ]; then
517 | check_mounted_folder_writable_with_chown "/logs"
518 | : ${NEO4J_server_directories_logs:="/logs"}
519 | fi
520 |
521 | if [ -d /data ]; then
522 | check_mounted_folder_writable_with_chown "/data"
523 | if [ -d /data/databases ]; then
524 | check_mounted_folder_writable_with_chown "/data/databases"
525 | fi
526 | if [ -d /data/dbms ]; then
527 | check_mounted_folder_writable_with_chown "/data/dbms"
528 | fi
529 | if [ -d /data/transactions ]; then
530 | check_mounted_folder_writable_with_chown "/data/transactions"
531 | fi
532 | fi
533 |
534 | if [ -d /licenses ]; then
535 | check_mounted_folder_readable "/licenses"
536 | : ${NEO4J_server_directories_licenses:="/licenses"}
537 | fi
538 |
539 |
540 | # ==== LOAD PLUGINS ====
541 |
542 | if [[ -n "${NEO4J_PLUGINS:-}" ]]; then
543 | # NEO4J_PLUGINS should be a json array of plugins like '["graph-algorithms", "apoc", "streams", "graphql"]'
544 | install_neo4j_plugins
545 | fi
546 |
547 | # ==== RENAME LEGACY ENVIRONMENT CONF VARIABLES ====
548 |
549 | # Env variable naming convention:
550 | # - prefix NEO4J_
551 | # - double underscore char '__' instead of single underscore '_' char in the setting name
552 | # - underscore char '_' instead of dot '.' char in the setting name
553 | # Example:
554 | # NEO4J_server_tx__log_rotation_retention__policy env variable to set
555 | # server.tx_log.rotation.retention_policy setting
556 |
557 | # we only need to override the configurations with a docker specific override.
558 | # The other config renames will be taken care of inside Neo4j.
559 | : ${NEO4J_db_tx__log_rotation_retention__policy:=${NEO4J_dbms_tx__log_rotation_retention__policy:-}}
560 | : ${NEO4J_server_memory_pagecache_size:=${NEO4J_dbms_memory_pagecache_size:-}}
561 | : ${NEO4J_server_default__listen__address:=${NEO4J_dbms_default__listen__address:-}}
562 | if [ "${NEO4J_EDITION}" == "enterprise" ];
563 | then
564 | : ${NEO4J_server_discovery_advertised__address:=${NEO4J_causal__clustering_discovery__advertised__address:-}}
565 | : ${NEO4J_server_cluster_advertised__address:=${NEO4J_causal__clustering_transaction__advertised__address:-}}
566 | : ${NEO4J_server_cluster_raft_advertised__address:=${NEO4J_causal__clustering_raft__advertised__address:-}}
567 | fi
568 |
569 | # ==== SET CONFIGURATIONS ====
570 |
571 | ## == DOCKER SPECIFIC DEFAULT CONFIGURATIONS ===
572 | ## these should not override *any* configurations set by the user
573 |
574 | debug_msg "Setting docker specific configuration overrides"
575 | add_docker_default_to_conf "server.memory.pagecache.size" "512M"
576 | add_docker_default_to_conf "server.default_listen_address" "0.0.0.0"
577 |
578 | # set enterprise only docker defaults
579 | if [ "${NEO4J_EDITION}" == "enterprise" ];
580 | then
581 | debug_msg "Setting docker specific Enterprise Edition overrides"
582 | add_docker_default_to_conf "server.discovery.advertised_address" "$(hostname):5000"
583 | add_docker_default_to_conf "server.cluster.advertised_address" "$(hostname):6000"
584 | add_docker_default_to_conf "server.cluster.raft.advertised_address" "$(hostname):7000"
585 | add_docker_default_to_conf "server.routing.advertised_address" "$(hostname):7688"
586 | fi
587 |
588 | ## == ENVIRONMENT VARIABLE CONFIGURATIONS ===
589 | ## these override BOTH defaults and any existing values in the neo4j.conf file
590 |
591 | # these are docker control envs that have the NEO4J_ prefix but we don't want to add to the config.
592 | not_configs=("NEO4J_ACCEPT_LICENSE_AGREEMENT" "NEO4J_AUTH" "NEO4J_AUTH_PATH" "NEO4J_DEBUG" "NEO4J_EDITION" \
593 | "NEO4J_HOME" "NEO4J_PLUGINS" "NEO4J_SHA256" "NEO4J_TARBALL" "NEO4J_DEPRECATION_WARNING")
594 |
595 | debug_msg "Applying configuration settings that have been set using environment variables."
596 | # list env variables with prefix NEO4J_ and create settings from them
597 | for i in $( set | grep ^NEO4J_ | awk -F'=' '{print $1}' | sort -rn ); do
598 | if containsElement "$i" "${not_configs[@]}"; then
599 | continue
600 | fi
601 |
602 | # Skip env variables with suffix _FILE, these are docker secrets
603 | if [[ "$i" == *"_FILE" ]]; then
604 | continue
605 | fi
606 |
607 | setting=$(echo "${i}" | sed 's|^NEO4J_||' | sed 's|_|.|g' | sed 's|\.\.|_|g')
608 | value=$(echo "${!i}")
609 | # Don't allow settings with no value or settings that start with a number (neo4j converts settings to env variables and you cannot have an env variable that starts with a number)
610 | if [[ -n ${value} ]]; then
611 | if [[ ! "${setting}" =~ ^[0-9]+.*$ ]]; then
612 | add_env_setting_to_conf "${setting}" "${value}"
613 | else
614 | echo >&2 "WARNING: ${setting} not written to conf file. Settings that start with a number are not permitted."
615 | fi
616 | fi
617 | done
618 |
619 | # ==== SET PASSWORD ====
620 |
621 | if [[ -n "${NEO4J_AUTH_PATH:-}" ]]; then
622 | # Validate the existence of the password file
623 | if [ ! -f "${NEO4J_AUTH_PATH}" ]; then
624 | echo >&2 "The password file '${NEO4J_AUTH_PATH}' does not exist"
625 | exit 1
626 | fi
627 | # validate the password file is readable
628 | check_mounted_folder_readable "${NEO4J_AUTH_PATH}"
629 |
630 | debug_msg "Setting initial password from file ${NEO4J_AUTH_PATH}"
631 | set_initial_password "$(cat ${NEO4J_AUTH_PATH})"
632 | else
633 | debug_msg "Setting initial password from environment"
634 | set_initial_password "${NEO4J_AUTH:-}"
635 | fi
636 |
637 | # ==== CLEANUP RUN FILE ====
638 |
639 | if [ -f "${NEO4J_HOME}"/run/neo4j.pid ];
640 | then
641 | rm "${NEO4J_HOME}"/run/neo4j.pid
642 | fi
643 |
644 | # ==== INVOKE NEO4J STARTUP ====
645 |
646 | [ -f "${EXTENSION_SCRIPT:-}" ] && . ${EXTENSION_SCRIPT}
647 |
648 | if [ "${cmd}" == "dump-config" ]; then
649 | if [ ! -d "/conf" ]; then
650 | echo >&2 "You must mount a folder to /conf so that the configuration file(s) can be dumped to there."
651 | exit 1
652 | fi
653 | check_mounted_folder_writable_with_chown "/conf"
654 | cp --recursive "${NEO4J_HOME}"/conf/* /conf
655 | echo "Config Dumped"
656 | exit 0
657 | fi
658 |
659 | # this prints out a command for us to run.
660 | # the command is something like: `java ...[lots of java options]... neo4j.mainClass ...[some neo4j options]...`
661 | # putting debug messages here causes the function to break
662 | function get_neo4j_run_cmd {
663 |
664 | local extra_args=()
665 |
666 | if [ "${EXTENDED_CONF+"yes"}" == "yes" ]; then
667 | extra_args+=("--expand-commands")
668 | fi
669 |
670 | if running_as_root; then
671 | su-exec neo4j:neo4j neo4j console --dry-run "${extra_args[@]}"
672 | else
673 | neo4j console --dry-run "${extra_args[@]}"
674 | fi
675 | }
676 |
677 | if [ "${cmd}" == "neo4j" ]; then
678 | # separate declaration and use of get_neo4j_run_cmd so that error codes are correctly surfaced
679 | debug_msg "getting full neo4j run command"
680 | neo4j_console_cmd="$(get_neo4j_run_cmd)"
681 | debug_msg "${exec_cmd} ${neo4j_console_cmd}"
682 | #%%DEPRECATION_WARNING_PLACEHOLDER%%
683 | eval ${exec_cmd} ${neo4j_console_cmd?:No Neo4j command was generated}
684 | else
685 | debug_msg "${exec_cmd}" "$@"
686 | ${exec_cmd} "$@"
687 | fi
688 |
```