#
tokens: 42165/50000 5/135 files (page 4/6)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 4 of 6. Use http://codebase.md/datalayer/jupyter-mcp-server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .github
│   ├── copilot-instructions.md
│   ├── dependabot.yml
│   └── workflows
│       ├── build.yml
│       ├── fix-license-header.yml
│       ├── lint.sh
│       ├── prep-release.yml
│       ├── publish-release.yml
│       ├── release.yml
│       └── test.yml
├── .gitignore
├── .licenserc.yaml
├── .pre-commit-config.yaml
├── .vscode
│   ├── mcp.json
│   └── settings.json
├── ARCHITECTURE.md
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── dev
│   ├── content
│   │   ├── new.ipynb
│   │   ├── notebook.ipynb
│   │   └── README.md
│   └── README.md
├── Dockerfile
├── docs
│   ├── .gitignore
│   ├── .yarnrc.yml
│   ├── babel.config.js
│   ├── docs
│   │   ├── _category_.yaml
│   │   ├── clients
│   │   │   ├── _category_.yaml
│   │   │   ├── claude_desktop
│   │   │   │   ├── _category_.yaml
│   │   │   │   └── index.mdx
│   │   │   ├── cline
│   │   │   │   ├── _category_.yaml
│   │   │   │   └── index.mdx
│   │   │   ├── cursor
│   │   │   │   ├── _category_.yaml
│   │   │   │   └── index.mdx
│   │   │   ├── index.mdx
│   │   │   ├── vscode
│   │   │   │   ├── _category_.yaml
│   │   │   │   └── index.mdx
│   │   │   └── windsurf
│   │   │       ├── _category_.yaml
│   │   │       └── index.mdx
│   │   ├── configure
│   │   │   ├── _category_.yaml
│   │   │   └── index.mdx
│   │   ├── contribute
│   │   │   ├── _category_.yaml
│   │   │   └── index.mdx
│   │   ├── deployment
│   │   │   ├── _category_.yaml
│   │   │   ├── datalayer
│   │   │   │   ├── _category_.yaml
│   │   │   │   └── streamable-http
│   │   │   │       └── index.mdx
│   │   │   ├── index.mdx
│   │   │   └── jupyter
│   │   │       ├── _category_.yaml
│   │   │       ├── index.mdx
│   │   │       ├── stdio
│   │   │       │   ├── _category_.yaml
│   │   │       │   └── index.mdx
│   │   │       └── streamable-http
│   │   │           ├── _category_.yaml
│   │   │           ├── jupyter-extension
│   │   │           │   └── index.mdx
│   │   │           └── standalone
│   │   │               └── index.mdx
│   │   ├── index.mdx
│   │   ├── releases
│   │   │   ├── _category_.yaml
│   │   │   └── index.mdx
│   │   ├── resources
│   │   │   ├── _category_.yaml
│   │   │   └── index.mdx
│   │   └── tools
│   │       ├── _category_.yaml
│   │       └── index.mdx
│   ├── docusaurus.config.js
│   ├── LICENSE
│   ├── Makefile
│   ├── package.json
│   ├── README.md
│   ├── sidebars.js
│   ├── src
│   │   ├── components
│   │   │   ├── HomepageFeatures.js
│   │   │   ├── HomepageFeatures.module.css
│   │   │   ├── HomepageProducts.js
│   │   │   └── HomepageProducts.module.css
│   │   ├── css
│   │   │   └── custom.css
│   │   ├── pages
│   │   │   ├── index.module.css
│   │   │   ├── markdown-page.md
│   │   │   └── testimonials.tsx
│   │   └── theme
│   │       └── CustomDocItem.tsx
│   └── static
│       └── img
│           ├── datalayer
│           │   ├── logo.png
│           │   └── logo.svg
│           ├── favicon.ico
│           ├── feature_1.svg
│           ├── feature_2.svg
│           ├── feature_3.svg
│           ├── product_1.svg
│           ├── product_2.svg
│           └── product_3.svg
├── examples
│   └── integration_example.py
├── jupyter_mcp_server
│   ├── __init__.py
│   ├── __main__.py
│   ├── __version__.py
│   ├── config.py
│   ├── enroll.py
│   ├── env.py
│   ├── jupyter_extension
│   │   ├── __init__.py
│   │   ├── backends
│   │   │   ├── __init__.py
│   │   │   ├── base.py
│   │   │   ├── local_backend.py
│   │   │   └── remote_backend.py
│   │   ├── context.py
│   │   ├── extension.py
│   │   ├── handlers.py
│   │   └── protocol
│   │       ├── __init__.py
│   │       └── messages.py
│   ├── models.py
│   ├── notebook_manager.py
│   ├── server_modes.py
│   ├── server.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── _base.py
│   │   ├── _registry.py
│   │   ├── assign_kernel_to_notebook_tool.py
│   │   ├── delete_cell_tool.py
│   │   ├── execute_cell_tool.py
│   │   ├── execute_ipython_tool.py
│   │   ├── insert_cell_tool.py
│   │   ├── insert_execute_code_cell_tool.py
│   │   ├── list_cells_tool.py
│   │   ├── list_files_tool.py
│   │   ├── list_kernels_tool.py
│   │   ├── list_notebooks_tool.py
│   │   ├── overwrite_cell_source_tool.py
│   │   ├── read_cell_tool.py
│   │   ├── read_cells_tool.py
│   │   ├── restart_notebook_tool.py
│   │   ├── unuse_notebook_tool.py
│   │   └── use_notebook_tool.py
│   └── utils.py
├── jupyter-config
│   ├── jupyter_notebook_config
│   │   └── jupyter_mcp_server.json
│   └── jupyter_server_config.d
│       └── jupyter_mcp_server.json
├── LICENSE
├── Makefile
├── pyproject.toml
├── pytest.ini
├── README.md
├── RELEASE.md
├── smithery.yaml
└── tests
    ├── __init__.py
    ├── conftest.py
    ├── test_common.py
    ├── test_config.py
    ├── test_jupyter_extension.py
    ├── test_list_kernels.py
    ├── test_tools.py
    └── test_use_notebook.py
```

# Files

--------------------------------------------------------------------------------
/docs/static/img/feature_3.svg:
--------------------------------------------------------------------------------

```
  1 | <?xml version="1.0" encoding="UTF-8" standalone="no"?>
  2 | <!--
  3 |   ~ Copyright (c) 2023-2024 Datalayer, Inc.
  4 |   ~
  5 |   ~ BSD 3-Clause License
  6 | -->
  7 | 
  8 | <svg
  9 |    xmlns:dc="http://purl.org/dc/elements/1.1/"
 10 |    xmlns:cc="http://creativecommons.org/ns#"
 11 |    xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
 12 |    xmlns:svg="http://www.w3.org/2000/svg"
 13 |    xmlns="http://www.w3.org/2000/svg"
 14 |    xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
 15 |    xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
 16 |    viewBox="0 0 302.65826 398.12268"
 17 |    version="1.1"
 18 |    id="svg1054"
 19 |    sodipodi:docname="6.svg"
 20 |    inkscape:version="1.0.1 (c497b03c, 2020-09-10)"
 21 |    width="302.65826"
 22 |    height="398.12268">
 23 |   <metadata
 24 |      id="metadata1058">
 25 |     <rdf:RDF>
 26 |       <cc:Work
 27 |          rdf:about="">
 28 |         <dc:format>image/svg+xml</dc:format>
 29 |         <dc:type
 30 |            rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
 31 |         <dc:title>Web_SVG</dc:title>
 32 |       </cc:Work>
 33 |     </rdf:RDF>
 34 |   </metadata>
 35 |   <sodipodi:namedview
 36 |      pagecolor="#ffffff"
 37 |      bordercolor="#666666"
 38 |      borderopacity="1"
 39 |      objecttolerance="10"
 40 |      gridtolerance="10"
 41 |      guidetolerance="10"
 42 |      inkscape:pageopacity="0"
 43 |      inkscape:pageshadow="2"
 44 |      inkscape:window-width="1256"
 45 |      inkscape:window-height="607"
 46 |      id="namedview1056"
 47 |      showgrid="false"
 48 |      inkscape:zoom="0.83484846"
 49 |      inkscape:cx="105.00162"
 50 |      inkscape:cy="149.95736"
 51 |      inkscape:window-x="0"
 52 |      inkscape:window-y="25"
 53 |      inkscape:window-maximized="0"
 54 |      inkscape:current-layer="svg1054"
 55 |      inkscape:document-rotation="0"
 56 |      fit-margin-top="0"
 57 |      fit-margin-left="0"
 58 |      fit-margin-right="0"
 59 |      fit-margin-bottom="0" />
 60 |   <defs
 61 |      id="defs843">
 62 |     <style
 63 |        id="style833">.cls-1{fill:none;}.cls-2,.cls-9{fill:#d6d8e5;}.cls-2{opacity:0.4;isolation:isolate;}.cls-3{fill:#d5d6e0;}.cls-4{fill:#e9eaf4;}.cls-5{fill:url(#Безымянный_градиент_15);}.cls-6{clip-path:url(#clip-path);}.cls-7{fill:#8c50ff;}.cls-8{opacity:0.05;}.cls-9{opacity:0.4;}.cls-10{fill:#c5c7d3;}.cls-11{fill:#38226d;}.cls-12{fill:#9c73ff;}.cls-13{fill:#ffcea9;}.cls-14{fill:#ededed;}.cls-15{fill:#e5e5e5;}.cls-16{fill:#f4f4f4;}.cls-17{fill:#bfbfbf;}.cls-18{fill:#3a2c6d;}.cls-19{fill:#dceeff;}.cls-20{fill:#dbdbdb;}.cls-21{fill:#1e3779;}.cls-22{fill:#031f60;}</style>
 64 |     <linearGradient
 65 |        id="Безымянный_градиент_15"
 66 |        x1="235.62"
 67 |        y1="356.16"
 68 |        x2="235.62"
 69 |        y2="256.92999"
 70 |        gradientUnits="userSpaceOnUse">
 71 |       <stop
 72 |          offset="0"
 73 |          stop-color="#e9eaf4"
 74 |          id="stop835" />
 75 |       <stop
 76 |          offset="0.63"
 77 |          stop-color="#e9eaf4"
 78 |          stop-opacity="0"
 79 |          id="stop837" />
 80 |     </linearGradient>
 81 |     <clipPath
 82 |        id="clip-path">
 83 |       <circle
 84 |          class="cls-1"
 85 |          cx="233.41"
 86 |          cy="151.32001"
 87 |          r="151.32001"
 88 |          id="circle840" />
 89 |     </clipPath>
 90 |   </defs>
 91 |   <title
 92 |      id="title845">Web_SVG</title>
 93 |   <path
 94 |      class="cls-2"
 95 |      d="m 146.09827,397.27001 -47.08,-27.18 c -2.19,-1.27 -1.92,-3.48 0.62,-4.94 l 45.84,-26.47 c 2.54,-1.46 6.37,-1.62 8.56,-0.36 l 47.07,27.18 c 2.2,1.27 1.92,3.48 -0.61,4.94 l -45.85,26.47 c -2.53,1.47 -6.36,1.62 -8.55,0.36 z"
 96 |      id="path847" />
 97 |   <path
 98 |      class="cls-3"
 99 |      d="m 205.92827,362.37001 -2.32,0.71 -46.23,-26.7 c -2.19,-1.26 -6,-1.1 -8.55,0.36 l -45.85,26.47 -0.11,0.07 -2,-0.61 v 3 c 0,0.11 0,0.22 0,0.34 v 0.24 0 a 2.68,2.68 0 0 0 1.44,1.86 l 47.08,27.18 c 2.19,1.26 6,1.11 8.56,-0.36 l 45.98,-26.43 a 3.62,3.62 0 0 0 2.08,-2.64 v 0 z"
100 |      id="path851" />
101 |   <path
102 |      class="cls-4"
103 |      d="m 149.47827,392.14001 -47.08,-27.14 c -2.19,-1.26 -1.91,-3.47 0.62,-4.93 l 45.85,-26.47 c 2.53,-1.47 6.36,-1.63 8.55,-0.36 l 47.08,27.18 c 2.19,1.27 1.92,3.48 -0.62,4.94 l -45.84,26.47 c -2.54,1.42 -6.37,1.58 -8.56,0.31 z"
104 |      id="path853" />
105 |   <path
106 |      class="cls-3"
107 |      d="m 173.30827,360.25001 v -4 l -5.7,1.5 -12.68,-7.35 a 3.58,3.58 0 0 0 -3.24,0.14 l -12.33,7.12 -5.83,-1.54 v 4.31 0 1 a 0.48,0.48 0 0 0 0,0.12 v 0.1 0 a 1,1 0 0 0 0.55,0.7 l 17.85,10.39 a 3.61,3.61 0 0 0 3.24,-0.13 l 17.38,-10 a 1.36,1.36 0 0 0 0.78,-1 v 0 -1.32 z"
108 |      id="path855" />
109 |   <path
110 |      class="cls-4"
111 |      d="m 151.92827,367.33001 -17.87,-10.33 c -0.83,-0.48 -0.73,-1.32 0.23,-1.87 l 17.38,-10 a 3.58,3.58 0 0 1 3.26,-0.13 l 17.84,10.3 c 0.83,0.48 0.73,1.32 -0.23,1.88 l -17.38,10 a 3.61,3.61 0 0 1 -3.23,0.15 z"
112 |      id="path857" />
113 |   <polygon
114 |      class="cls-5"
115 |      points="129.08,260.92 342.17,256.93 235.48,356.16 "
116 |      id="polygon859"
117 |      style="fill:url(#%D0%91%D0%B5%D0%B7%D1%8B%D0%BC%D1%8F%D0%BD%D0%BD%D1%8B%D0%B9_%D0%B3%D1%80%D0%B0%D0%B4%D0%B8%D0%B5%D0%BD%D1%82_15)"
118 |      transform="translate(-82.071732)" />
119 |   <circle
120 |      class="cls-4"
121 |      cx="151.33827"
122 |      cy="151.32001"
123 |      r="151.32001"
124 |      id="circle861" />
125 |   <g
126 |      class="cls-6"
127 |      clip-path="url(#clip-path)"
128 |      id="g865"
129 |      transform="translate(-82.071732)">
130 |     <path
131 |        class="cls-7"
132 |        d="m 193.61,61.31 0.51,-1.79 v -3.88 a 9.1,9.1 0 0 1 -2.17,0 c -0.38,-0.17 -0.12,-1.23 -0.12,-1.23 l 1.78,-1.53 -3.57,0.76 -1.28,2 -4,2.47 1.79,1 1.15,0.76 1.72,0.26 V 59 c 0,0 2,-0.51 2.49,0.25 l -0.25,1.41 z m -44.22,55.25 a 4,4 0 0 0 -3.28,0 2.7,2.7 0 0 1 -2.56,2.73 l 2.92,1.63 2.92,-1.09 3.82,-1.27 3.27,2.36 c 2.46,-3 0,-4.73 0,-4.73 z m 62.1,55.63 -2.83,-0.6 -4.92,-3.13 h -4.32 l -3.73,-1.93 h -1.49 c 0,0 0.3,-2.09 -0.15,-2.24 -0.45,-0.15 -3.72,-2.09 -3.72,-2.09 l -2.54,0.9 v -1.79 L 186,160.56 h -1.49 l 0.15,-1.49 c 0,0 1.34,-2.65 0.6,-3.11 -0.74,-0.46 -3.43,-6.43 -3.43,-6.43 l -4,-2.24 h -4.32 l -3.73,-3.57 -3.72,-3 -2.09,-4.17 -2.24,-1.49 c 0,0 -2.53,1.64 -3.13,1.49 -0.6,-0.15 -4.32,-1.49 -4.32,-1.49 l -3.13,-0.89 -3.91,1.83 v 4 l -1.49,-1 v -1.5 l 1.34,-2.53 0.6,-1.79 -1.34,-0.59 -1.79,2.08 -4.47,1.33 -0.9,1.94 -2.09,1.94 -0.59,1.64 -3.28,-2.68 a 25.86,25.86 0 0 1 -3.43,1.49 22.67,22.67 0 0 1 -3.43,-2.24 l -1.64,-3.13 0.9,-4.17 0.74,-4.33 -2.83,-1.79 h -3.88 l -3,-0.89 1.4,-2.24 1.64,-3.28 1.49,-2.38 c 0,0 0.42,-1.09 0.9,-1.34 1.9,-1 0,-1.94 0,-1.94 0,0 -4,-0.3 -4.62,0 -0.62,0.3 -2.24,1.19 -2.24,1.19 v 3.43 l -3,1.34 -4.62,1 -3.13,-3.14 c -0.45,-0.44 -1.49,-5.36 -1.49,-5.36 l 1.93,-1.79 0.9,-1.49 0.15,-3.58 1.93,-1.64 0.75,-1.94 2.22,-1.62 2.13,-1.95 1.94,-0.59 2.68,0.59 c 0.15,-1 5.07,-0.59 5.07,-0.59 l 1.94,-0.1 -0.15,-1.09 c 0.9,-0.75 3.88,0 3.88,0 l 4,-0.34 c 0.88,0.19 1.62,2 1.62,2 l -1.79,1.64 1.2,3.57 1.19,1.64 -0.45,1.64 2.87,-0.42 0.14,-2.84 0.15,-3.43 -0.44,-3.76 2.68,-5.78 10.88,-7 -0.15,-2.54 0.74,-3.65 1.94,0.67 3.43,-4.07 4.47,-1.48 2.68,-1.82 1.34,-2.69 4.92,-1.49 4.92,-2.53 -2.26,2.41 -0.3,2.09 2.68,-0.6 2.39,-1.34 2.08,-0.59 1.94,-0.6 0.6,-1.34 h -1.34 l -2.09,-0.75 c -1.34,-1 -3,-2.23 -3,-2.23 a 1.66,1.66 0 0 1 0.44,-1.35 l 1.35,-1.21 c 0,0 0.59,-0.72 0.15,-0.72 -0.44,0 -3.73,-0.72 -3.73,-0.72 l -3.58,1.44 H 168 l 2.54,-2.21 h 2.83 l 3.13,-0.75 h 6.5 l 3.58,-0.75 2.55,-1.37 3.13,-0.32 0.9,-3.35 -2.09,-1.79 -3,-1.55 -0.6,-3.52 -1.64,-3.58 h -1.49 l -0.15,1.94 h -2.38 l -0.6,1.19 -0.89,0.6 -1.49,0.3 -1.35,-1.35 1.79,-1.93 -2.08,-1.94 -2.54,-1.34 H 172 l -1.94,0.59 -2.68,3.13 -2.09,1.79 0.6,1.64 -0.76,2.49 0.45,1 -2.54,0.9 -2.83,0.59 -1.19,-0.3 -0.9,1.2 1.64,3.35 -1.64,1.12 h -2.23 l -1.89,-2.43 -0.45,-1.38 1.34,-0.74 -1,-1.79 -3.13,-0.15 -4.17,-2.59 h -1.94 l -0.45,-1.59 -1.34,-0.59 1.94,-2.24 2.83,-1.64 4.92,-1.79 1.79,-0.59 3.87,-0.9 2.6,-1.44 2.38,-2.09 h 3.13 l 3.73,-0.94 h 2.39 l 1,-2.78 h -4.77 l -1.79,1.37 h -3.13 a 5.39,5.39 0 0 0 -2.39,-1.37 v -1.1 L 163,24.6 165.13,23 a 12.52,12.52 0 0 0 1.79,-3.13 l 4.32,1.49 3.27,-0.17 2.25,-0.13 2.83,0.15 1.64,0.6 C 180.78,23 179,22.11 179,22.11 l -5.37,0.59 -2.39,1.3 0.75,-1.64 -2.54,0.15 -3,1.93 2.09,1.05 h 4.92 l 2.68,1 2.24,-0.89 2.38,0.89 2.84,0.9 -3.43,1.07 -3,1.41 -3,0.65 -2.38,0.89 1,2.09 6.26,-0.15 3.95,2.43 h 3.28 l 0.15,-1.34 0.74,-1.34 1.5,-0.3 -1.2,-1.64 -0.44,-1.24 h 1.78 l 1.5,1.09 c 1.19,0.9 3.72,-1.09 3.72,-1.09 l 3.73,-1.41 -4.62,-1.37 -1.49,-1.49 c 0.44,-0.9 -4.92,-2.24 -4.92,-2.24 l -4.18,-2.53 -1,-1.05 H 178 l 2.83,-1.19 7,0.74 3.58,-0.74 8.2,-2.09 5.51,-1.34 4,-1.49 -4.63,-0.72 c 0,0 -3.28,0.9 -3.73,0.9 -0.45,0 -4.32,0.3 -4.77,0.3 h -10.44 l -3.27,0.74 3,0.75 -1.79,1 h -3.13 l -1.79,0.6 2.38,-1.79 -2.53,-0.6 -3.13,0.75 -3.88,2.23 1.79,0.6 2.54,-1 2.08,0.44 -2.23,0.6 -0.9,1.19 1.49,0.45 -4,0.3 -7,-1.19 -2.83,3.42 -5.67,4 1.49,1.19 -1,0.63 -1.34,0.71 -3.88,-0.71 -5.36,1.41 -9.84,2.43 -32.89,31.65 -13.12,17.1 -7.6,45.62 13.41,-4.47 2.09,2.23 3.58,0.6 2.39,1.49 2.53,-1 2.09,2.09 4.62,2.53 4.47,0.6 1.34,4.47 c 0,0 3.13,2.83 3,3.43 -0.13,0.6 1,3.28 1.64,3.28 a 30.41,30.41 0 0 1 4.33,1.94 l 2.53,0.29 1,-2.23 h 2.24 v 2.23 l 0.9,1.2 0.29,3.42 0.9,2.54 -1.49,1.64 -2.39,3 -3,4.15 0.3,4 -0.75,2.68 2.24,0.75 -1.94,2.68 a 32.55,32.55 0 0 1 1,3.43 30.41,30.41 0 0 0 1.64,3.73 l 2.09,1.79 0.9,3 5.07,10.43 3.57,3 5.22,2.68 c 0,0 3.28,3.58 3.43,4 0.15,0.42 1.79,7 1.79,7 l -0.07,3.68 1,3.87 0.3,4.33 0.45,6 1.49,4.77 1.34,6.26 -1.64,2.09 1,3.09 1.2,4.51 2.08,2.24 -0.14,3.58 0.44,3.57 9.25,9.84 1.78,1.94 v -4.62 l 3,-1.73 V 260 l -3,-1.34 v -2.09 l 1.47,-1.34 1.48,-2.39 v -1.94 c 1.23,-0.44 0,-1.79 0,-1.79 h -2.05 l -0.15,-1.93 h 1.94 l 0.8,1.19 1.58,-1.19 -1.19,-2.39 c 0.3,-0.74 2.39,0 2.39,0 l 3.57,-2.09 1.49,-1.78 v -2.39 l -1.7,-1.53 -1.5,-2.39 h 1.79 c 0,0 2.69,1.35 3.58,0 0.89,-1.35 2.24,-3.13 2.24,-3.13 l 1.64,-3.87 c 0,0 3.13,-5.67 3.43,-6.12 0.3,-0.45 0,-6 0,-6 l 4.92,-4 h 3.28 l 2.68,-4.48 2.09,-2.94 v -5 l 0.74,-4.77 -1.17,-4.66 4.17,-5.52 3.13,-3 -1.19,-5.51 z m 33.22,-117.14 1,0.59 1.36,-1.18 H 249 l 1.78,-0.6 0.8,-1.1 -0.38,-1.52 0.76,-0.81 c 0.09,-1.23 -1.86,-2.12 -1.86,-2.12 H 249 l -1.36,1.57 h -1.44 l -0.51,1.78 0.21,1.34 -1.19,0.74 z m 9,-11.27 -1.52,0.68 v 2.29 l 0.17,0.59 a 6.36,6.36 0 0 1 0.17,1.44 l 1.1,0.76 1.1,0.68 0.51,1 c 0,0 -0.17,1 -0.76,1 l -1.36,0.43 0.51,0.93 c 0,0 -1,0.51 -1.1,0.93 l 0.93,0.68 -0.85,0.85 h -1.86 v 1 h 3.9 l 1.61,-1 2.37,0.42 c 0,0 1.95,-0.42 2.37,-0.42 l -0.34,-0.43 0.6,-0.76 0.93,-1.1 -1.19,-0.48 -1.35,-0.42 0.17,-1.44 -1.28,-1 -1.94,-1.06 -0.17,-1.95 0.17,-1.69 h -2 l -0.42,-0.85 1.18,-1.1 z m 11.1,7.29 v -0.64 h 1.36 l 0.76,-0.55 v -2.12 l 1,-0.51 -0.34,-1.69 L 266,45 263.63,45.76 V 47 l -1.44,0.17 -0.34,1.14 1,0.47 -1.61,0.93 -0.43,0.72 1.36,1.23 z m 85.4,51.37 5.6,3 2.89,-1.12 1.92,2.72 5.13,1.13 2.24,1.12 6.25,-16.5 L 372.45,84 344.45,38.18 336.09,32 H 326 l -4.49,0.64 -1.12,2.25 L 318,34 316.87,32 h -2.25 l 1,1 0.81,1.61 h -3.53 v 1.99 l -2.52,-0.37 -1.53,-0.23 -0.86,0.43 0.8,0.49 1.11,0.25 -1,0.37 -1.34,-0.54 -1.67,-0.56 -0.5,-1.11 -1.23,-1 -1.42,-0.19 -1,-1.17 2.18,0.19 1.92,0.86 1.6,0.19 c 0,0 1,-0.25 1.11,-0.07 a 12.12,12.12 0 0 0 2.35,0.5 l 1.3,-1 L 310,32 306.1,30.85 c 0,0 -6.55,-0.9 -6.86,-0.88 a 28.82,28.82 0 0 1 -2.78,-0.85 l -2.28,-0.56 h -5 l -7.6,1.41 -4.2,2.67 -5,4.51 c 0,0 -4.32,0.62 -4.57,0.68 a 20.44,20.44 0 0 0 -2,1.91 l 1,2.17 -0.25,1 0.68,0.55 0.5,1 0.68,1 2.53,-0.37 2.72,-1.54 2.1,1.11 1.61,2.78 0.3,1.67 1.92,0.37 1.3,-1.11 1.36,-0.31 0.74,-2.23 0.12,-1.73 1.86,-0.55 v -1.14 l -0.68,-0.49 -0.87,-0.56 -0.31,-2 2.83,-1.79 c 0,0 1.5,-1.36 1.5,-1.6 0,-0.24 0.74,-1.06 0.74,-1.06 0,0 1.18,0.07 1.55,0.13 0.37,0.06 2.34,0.74 2.34,0.74 l 0.75,0.56 -2.29,1.11 -1.35,1.36 -1,0.68 0.93,2.16 2.41,1.24 2.16,-0.44 3.43,-0.17 c 0,0 3.93,0.79 4.11,0.92 0.18,0.13 -1.13,0.55 -1.13,0.55 l -1.53,0.43 -0.86,0.56 -1.46,-0.74 a 3.78,3.78 0 0 0 -2,-0.68 4.09,4.09 0 0 0 -1.67,0.74 l 0.93,1.42 1.29,0.87 c 0,0 -0.55,1.6 -0.92,1.54 -0.37,-0.06 -1,-1.17 -1,-1.17 l -1.43,-0.56 -0.92,0.56 0.12,1.36 -0.4,1.92 -1.54,1 -1.12,0.87 -1.48,-0.87 -1.91,0.56 -3.34,-0.19 -0.74,0.68 -1.18,-0.49 c 0,0 -2.9,0.12 -3.09,0.18 a 10.62,10.62 0 0 1 -1.91,-0.86 l -0.74,-1.67 0.92,-1.11 -0.43,-0.5 -0.37,-0.92 0.62,-0.75 -0.93,-0.24 -1.54,1.54 -0.07,1.49 c 0,0 0.81,1.42 0.87,1.6 a 10.91,10.91 0 0 1 0.06,1.3 l -1.54,0.62 -1.67,0.49 0.31,0.62 1.73,0.31 -0.56,1 -1,0.06 -0.49,1.24 -0.31,0.68 -1.36,-0.37 0.06,-0.87 0.06,-1.11 -0.55,-0.56 -0.81,0.87 -0.86,1.11 -2,0.37 -0.93,0.74 -0.62,1.36 -1.48,0.68 -1.17,0.74 -1.8,-0.92 -0.24,0.43 0.37,0.92 -0.43,0.5 -2.35,-0.19 -1.3,-0.18 -0.19,1.73 2.29,0.37 1.11,1 c 0,0 0.87,1.05 1.05,1.05 0.18,0 1.43,0.31 1.43,0.31 l -0.5,2.72 -1.11,2.53 -4.64,-0.06 -5.06,-0.12 -1.61,0.61 -0.5,1.42 0.44,1.49 -0.9,2.79 -0.37,1.49 A 7,7 0 0 0 245,79 c 0.13,0.18 0.87,1.48 0.87,1.48 l -0.56,2.1 1.67,0.5 2.78,0.18 1.48,0.81 1.12,0.18 1.85,-0.8 2.78,-0.37 2.6,-2.17 -0.43,-2.65 1.6,-2 3.4,-2.16 1.48,-0.5 -0.18,-2.22 2,-1.18 2,0.68 1.36,-0.74 4.18,-1.28 4.14,3.84 5.68,3.89 c 0,0 1.18,2.35 1.24,2.53 a 7.8,7.8 0 0 1 -0.87,1.61 H 282 L 280.8,81 c 0,0 0.37,0.92 0.61,0.92 0.24,0 2.79,1.18 2.79,1.18 l 0.8,0.62 0.43,-0.13 -0.06,-1.17 0.25,-1.11 0.86,0.12 a 7.77,7.77 0 0 0 0.93,-1.24 17.93,17.93 0 0 1 0.86,-1.6 l -0.68,-1.3 0.38,-0.87 1,0.13 0.12,1.29 h 0.37 l -0.12,-1.6 -1,-0.62 -1.34,-0.89 -0.93,-1.35 -1.61,-0.44 -1.85,-1.3 -0.43,-1.48 -1.73,-0.86 -0.45,-1.36 0.5,-1.42 1.11,-0.19 0.06,1.36 1.11,0.37 0.75,-0.43 1.11,2 2.28,1.29 1.8,1 1.11,0.13 1.85,2 0.44,2.59 3.46,3.59 0.49,2.53 2.47,1.73 0.68,-0.56 -0.37,-1.6 1.36,-0.74 -0.43,-1.12 -1.37,-0.47 -0.56,-1.11 0.25,-1.8 -0.8,-0.92 0.74,0.12 0.55,0.68 0.87,-1.17 1.48,0.06 1.49,0.62 1.11,1.52 1.11,1.73 c 0,0 -0.74,0.43 -0.8,0.62 a 5.76,5.76 0 0 0 0.62,1.36 l 1.42,1.29 1.11,1.56 2.22,-0.06 0.25,1 1.61,0.18 1.54,-0.37 0.44,-1 0.68,0.93 1.79,0.62 1.54,-0.12 1.3,-1.43 1.17,0.5 0.87,0.55 -0.31,1.24 0.77,2.54 -1.12,2.57 v 3.36 H 313 l -2.56,1 -4.15,-2.07 -2.72,0.32 -4.65,-1.68 h -3.68 l -1.12,1 0.64,1.76 -1.44,1.44 -3.69,-2.24 -2.88,0.32 -1.12,-1.92 -4.33,-1.28 -3,-1.45 -2.3,-1.13 2.72,-3.2 -0.8,-2.72 -1.92,-1.13 -11.38,1.29 -4.48,1.76 -2.89,0.16 -3,1.12 -3.68,-0.8 -2.41,2.88 -2.88,1.92 -2.6,5.45 0.68,2.4 -3.36,3.21 -3.36,1.28 -1.6,3 c 0,0 -3.53,7.85 -3.85,8.49 -0.32,0.64 0,4.65 0,4.65 l 1.44,4.64 -3.36,7.85 1.28,3.85 1.76,3.52 4.65,6.57 -0.14,3.3 9.58,7.29 6.42,-2.58 6.18,1.96 7.68,-4.16 c 0,0 2.57,0 3.37,1.44 0.8,1.44 1.28,2.72 4.16,2.72 2.88,0 4.33,3.21 4.33,3.21 l -1.28,3.68 0.64,1.92 -1.28,3 3,6.24 2.24,4.81 2.09,4.65 0.48,5.92 -0.48,3.37 -3.69,6.41 1.12,9.93 1.28,2.24 -0.48,1.6 2.25,2.41 1.44,5.6 -1.28,5.29 1.92,4 c 0,0 1.76,4.33 1.92,4.81 0.16,0.48 1.28,2.08 1.12,2.88 -0.16,0.8 -0.64,2.89 -0.64,2.89 l 0.64,3.2 h 8.17 l 3.85,-1.28 9.45,-9.61 a 33.38,33.38 0 0 0 2.4,-3.21 c 0.16,-0.48 1.44,-6.4 1.44,-6.4 l 6.73,-2.73 v -5.23 l -1.44,-4.17 3.52,-3.36 9.46,-5.45 0.8,-4.17 v -7.2 l -1.77,-5.45 0.48,-3.36 -1.64,-1.45 3.05,-4.48 1.12,-5.93 4.48,-2.72 1,-2.73 8.17,-8.47 2.57,-8.06 2.4,-4.45 v -4.81 l -2.24,-0.8 -2.57,2.08 -3.52,0.32 -3.68,1 -3.05,-1.93 0.32,-2.4 -3.84,-4.32 -3.85,-2.25 -1.76,-5.76 -3.76,-3.58 -0.32,-4.48 -2.73,-4 -3.36,-7.52 -3.53,-3.52 -0.32,-2.89 1.45,0.65 1.6,1.28 0.64,2.4 c 1.28,0 1.76,-2.4 1.76,-2.4 l 1.12,2.4 1.12,3 3.53,5.29 1.92,0.32 2.24,4.65 -0.16,2.08 6.41,6.57 1.12,6.08 1.12,2.73 2.09,2.08 9.61,-2.56 1.76,-1.61 4.16,-1.28 v -1.92 l 4.81,-3.2 2,-3.72 1.16,-1.89 1.61,-2.24 -2.65,-3.65 -3.7,-3.13 -1.18,-3 -1.36,0.32 -1,2.88 -3.68,1 -1.92,-1.28 -0.64,-2.72 -1.28,0.32 -1.93,-3.53 -1.76,-1 -2.24,-3.68 1.6,-1.77 z m -75,-23.58 0.34,-1.53 v -2.2 a 2.47,2.47 0 0 0 -1.23,-0.59 1.34,1.34 0 0 0 -0.89,0.34 v 2.45 a 4.55,4.55 0 0 1 0,1.53 z m 0,-6.61 a 1,1 0 0 0 -1.19,-1.1 l -0.59,1.69 0.59,1.27 h 1.19 z m -43.7,-43.74 h 6 l 3.27,-4.71 h 7.53 l 1.21,-3 4.63,-2.45 c 2.19,-1.91 0,-5.37 0,-5.37 l -9.44,-1.18 -9.38,1.2 -13.64,-2.54 -8.41,3.82 c 0,0 -6.05,3.27 -6.87,3 a 29.17,29.17 0 0 0 -4.91,0 c 0,0 3,2.72 4.36,3 a 9.87,9.87 0 0 0 3,0 c 0,0 0,2.66 0.55,3.27 1.91,2.19 -2,3.82 -2,3.82 v 2.58 c 0,0 0.43,4.87 1.24,4.65 0.81,-0.22 2.14,2.59 2.14,2.59 l 2.51,2.73 h 4.59 l 5.73,-6.82 5.64,-2.45 z m 117.85,163.14 -0.74,-2 -0.85,0.84 -0.74,1.7 -1.69,1 -0.74,2.23 -1.06,2.64 -4.13,1.27 -1.8,3.07 -0.42,5.08 -1.48,2.52 -1.69,1.8 0.63,3 1.17,1.38 -1.17,1.58 1.17,1 h 3.91 l 2.22,-3.28 c 0,0 1.8,-3.81 1.8,-4.13 0,-0.32 0.85,-3.81 0.85,-3.81 l 3.06,-4 V 198 h 2.12 l -0.63,-3.92 z m -213.61,-81.15 -4.64,-1.58 c 0,0 -1.29,0.18 -2.8,0.54 a 16.83,16.83 0 0 0 -3.24,1 l 6.58,2.24 h 2.66 l 4.44,3.82 h 3 c 0,0 2.45,-1.09 1.63,-3 l -3.81,-1.37 z"
133 |        id="path863" />
134 |   </g>
135 |   <path
136 |      class="cls-8"
137 |      d="M 166.04827,269.00001 A 151.41,151.41 0 0 1 21.048268,74.340006 151.34,151.34 0 1 0 296.34827,194.65001 a 151.23,151.23 0 0 1 -130.3,74.35 z"
138 |      id="path867" />
139 |   <ellipse
140 |      class="cls-3"
141 |      cx="153.54826"
142 |      cy="356.16"
143 |      rx="4.0900002"
144 |      ry="2.3599999"
145 |      id="ellipse1050" />
146 | </svg>
147 | 
```

--------------------------------------------------------------------------------
/jupyter_mcp_server/tools/execute_cell_tool.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright (c) 2023-2024 Datalayer, Inc.
  2 | #
  3 | # BSD 3-Clause License
  4 | 
  5 | """Unified execute cell tool with configurable streaming."""
  6 | 
  7 | import asyncio
  8 | import logging
  9 | import time
 10 | from pathlib import Path
 11 | from typing import Union, List
 12 | from mcp.types import ImageContent
 13 | 
 14 | from jupyter_mcp_server.tools._base import BaseTool, ServerMode
 15 | from jupyter_mcp_server.config import get_config
 16 | from jupyter_mcp_server.utils import get_current_notebook_context, execute_via_execution_stack, safe_extract_outputs
 17 | 
 18 | logger = logging.getLogger(__name__)
 19 | 
 20 | 
 21 | class ExecuteCellTool(BaseTool):
 22 |     """Execute a cell with configurable timeout and optional streaming progress updates.
 23 | 
 24 |     Supports both MCP_SERVER (with WebSocket) and JUPYTER_SERVER modes.
 25 |     The stream parameter controls execution behavior:
 26 |     - stream=False: Use forced sync approach (more reliable for short-running cells)
 27 |     - stream=True: Use real-time monitoring approach (better for long-running cells)
 28 |     """
 29 | 
 30 |     @property
 31 |     def name(self) -> str:
 32 |         return "execute_cell"
 33 | 
 34 |     @property
 35 |     def description(self) -> str:
 36 |         return "Execute a cell with configurable timeout and optional streaming progress updates"
 37 | 
 38 |     async def _get_jupyter_ydoc(self, serverapp, file_id: str):
 39 |         """Get the YNotebook document if it's currently open in a collaborative session."""
 40 |         try:
 41 |             yroom_manager = serverapp.web_app.settings.get("yroom_manager")
 42 |             if yroom_manager is None:
 43 |                 return None
 44 | 
 45 |             room_id = f"json:notebook:{file_id}"
 46 | 
 47 |             if yroom_manager.has_room(room_id):
 48 |                 yroom = yroom_manager.get_room(room_id)
 49 |                 notebook = await yroom.get_jupyter_ydoc()
 50 |                 return notebook
 51 |         except Exception:
 52 |             pass
 53 | 
 54 |         return None
 55 | 
 56 |     async def _write_outputs_to_cell(
 57 |         self,
 58 |         notebook_path: str,
 59 |         cell_index: int,
 60 |         outputs: List[Union[str, ImageContent]]
 61 |     ):
 62 |         """Write execution outputs back to a notebook cell."""
 63 |         import nbformat
 64 |         from jupyter_mcp_server.utils import _clean_notebook_outputs
 65 | 
 66 |         with open(notebook_path, 'r', encoding='utf-8') as f:
 67 |             notebook = nbformat.read(f, as_version=4)
 68 | 
 69 |         _clean_notebook_outputs(notebook)
 70 | 
 71 |         if cell_index < 0 or cell_index >= len(notebook.cells):
 72 |             logger.warning(f"Cell index {cell_index} out of range, cannot write outputs")
 73 |             return
 74 | 
 75 |         cell = notebook.cells[cell_index]
 76 |         if cell.cell_type != 'code':
 77 |             logger.warning(f"Cell {cell_index} is not a code cell, cannot write outputs")
 78 |             return
 79 | 
 80 |         # Convert formatted outputs to nbformat structure
 81 |         cell.outputs = []
 82 |         for output in outputs:
 83 |             if isinstance(output, ImageContent):
 84 |                 cell.outputs.append(nbformat.v4.new_output(
 85 |                     output_type='display_data',
 86 |                     data={output.mimeType: output.data},
 87 |                     metadata={}
 88 |                 ))
 89 |             elif isinstance(output, str):
 90 |                 if output.startswith('[ERROR:') or output.startswith('[TIMEOUT ERROR:') or output.startswith('[PROGRESS:'):
 91 |                     cell.outputs.append(nbformat.v4.new_output(
 92 |                         output_type='stream',
 93 |                         name='stdout',
 94 |                         text=output
 95 |                     ))
 96 |                 else:
 97 |                     cell.outputs.append(nbformat.v4.new_output(
 98 |                         output_type='execute_result',
 99 |                         data={'text/plain': output},
100 |                         metadata={},
101 |                         execution_count=None
102 |                     ))
103 | 
104 |         # Update execution count
105 |         max_count = 0
106 |         for c in notebook.cells:
107 |             if c.cell_type == 'code' and c.execution_count:
108 |                 max_count = max(max_count, c.execution_count)
109 |         cell.execution_count = max_count + 1
110 | 
111 |         with open(notebook_path, 'w', encoding='utf-8') as f:
112 |             nbformat.write(notebook, f)
113 | 
114 |         logger.info(f"Wrote {len(outputs)} outputs to cell {cell_index} in {notebook_path}")
115 | 
116 |     async def execute(
117 |         self,
118 |         mode: ServerMode,
119 |         server_client=None,
120 |         contents_manager=None,
121 |         kernel_manager=None,
122 |         kernel_spec_manager=None,
123 |         notebook_manager=None,
124 |         serverapp=None,
125 |         # Tool-specific parameters
126 |         cell_index: int = None,
127 |         timeout_seconds: int = 300,
128 |         stream: bool = False,
129 |         progress_interval: int = 5,
130 |         ensure_kernel_alive_fn=None,
131 |         wait_for_kernel_idle_fn=None,
132 |         safe_extract_outputs_fn=None,
133 |         execute_cell_with_forced_sync_fn=None,
134 |         extract_output_fn=None,
135 |         **kwargs
136 |     ) -> List[Union[str, ImageContent]]:
137 |         """Execute a cell with configurable timeout and optional streaming progress updates.
138 | 
139 |         Args:
140 |             mode: Server mode (MCP_SERVER or JUPYTER_SERVER)
141 |             serverapp: ServerApp instance for JUPYTER_SERVER mode
142 |             kernel_manager: Kernel manager for JUPYTER_SERVER mode
143 |             notebook_manager: Notebook manager for MCP_SERVER mode
144 |             cell_index: Index of the cell to execute (0-based)
145 |             timeout_seconds: Maximum time to wait for execution (default: 300s)
146 |             stream: Enable streaming progress updates for long-running cells (default: False)
147 |             progress_interval: Seconds between progress updates when stream=True (default: 5s)
148 |             ensure_kernel_alive_fn: Function to ensure kernel is alive (MCP_SERVER)
149 |             wait_for_kernel_idle_fn: Function to wait for kernel idle state (MCP_SERVER)
150 |             safe_extract_outputs_fn: Function to safely extract outputs (MCP_SERVER)
151 |             execute_cell_with_forced_sync_fn: Function to execute cell with forced sync (MCP_SERVER, stream=False)
152 |             extract_output_fn: Function to extract single output (MCP_SERVER, stream=True)
153 | 
154 |         Returns:
155 |             List of outputs from the executed cell
156 |         """
157 |         if mode == ServerMode.JUPYTER_SERVER:
158 |             # JUPYTER_SERVER mode: Use ExecutionStack with YDoc awareness
159 |             from jupyter_mcp_server.jupyter_extension.context import get_server_context
160 | 
161 |             context = get_server_context()
162 |             serverapp = context.serverapp
163 | 
164 |             if serverapp is None:
165 |                 raise ValueError("serverapp is required for JUPYTER_SERVER mode")
166 |             if kernel_manager is None:
167 |                 raise ValueError("kernel_manager is required for JUPYTER_SERVER mode")
168 | 
169 |             notebook_path, kernel_id = get_current_notebook_context(notebook_manager)
170 | 
171 |             # Check if kernel needs to be started
172 |             if kernel_id is None:
173 |                 # No kernel available - start a new one on demand
174 |                 logger.info("No kernel_id available, starting new kernel for execute_cell")
175 |                 kernel_id = await kernel_manager.start_kernel()
176 | 
177 |                 # Wait a bit for kernel to initialize
178 |                 await asyncio.sleep(1.0)
179 |                 logger.info(f"Kernel {kernel_id} started and initialized")
180 | 
181 |                 # Store the kernel in notebook_manager if available
182 |                 if notebook_manager is not None:
183 |                     kernel_info = {"id": kernel_id}
184 |                     notebook_manager.add_notebook(
185 |                         name=notebook_path,
186 |                         kernel=kernel_info,
187 |                         server_url="local",
188 |                         path=notebook_path
189 |                     )
190 | 
191 |             logger.info(f"Executing cell {cell_index} in JUPYTER_SERVER mode (timeout: {timeout_seconds}s)")
192 | 
193 |             # Resolve to absolute path
194 |             if serverapp and not Path(notebook_path).is_absolute():
195 |                 root_dir = serverapp.root_dir
196 |                 notebook_path = str(Path(root_dir) / notebook_path)
197 | 
198 |             # Get file_id from file_id_manager
199 |             file_id_manager = serverapp.web_app.settings.get("file_id_manager")
200 |             if file_id_manager is None:
201 |                 raise RuntimeError("file_id_manager not available in serverapp")
202 | 
203 |             file_id = file_id_manager.get_id(notebook_path)
204 |             if file_id is None:
205 |                 file_id = file_id_manager.index(notebook_path)
206 | 
207 |             # Try to get YDoc if notebook is open
208 |             ydoc = await self._get_jupyter_ydoc(serverapp, file_id)
209 | 
210 |             if ydoc:
211 |                 # Notebook is open - use YDoc and RTC
212 |                 logger.info(f"Notebook {file_id} is open, using RTC mode")
213 | 
214 |                 if cell_index < 0 or cell_index >= len(ydoc.ycells):
215 |                     raise ValueError(f"Cell index {cell_index} out of range")
216 | 
217 |                 cell_id = ydoc.ycells[cell_index].get("id")
218 |                 cell_source = ydoc.ycells[cell_index].get("source")
219 | 
220 |                 if not cell_source or not cell_source.to_py().strip():
221 |                     return []
222 | 
223 |                 code_to_execute = cell_source.to_py()
224 |                 document_id = f"json:notebook:{file_id}"
225 | 
226 |                 # Execute with RTC metadata - outputs will sync automatically
227 |                 outputs = await execute_via_execution_stack(
228 |                     serverapp=serverapp,
229 |                     kernel_id=kernel_id,
230 |                     code=code_to_execute,
231 |                     document_id=document_id,
232 |                     cell_id=cell_id,
233 |                     timeout=timeout_seconds
234 |                 )
235 | 
236 |                 return safe_extract_outputs(outputs)
237 |             else:
238 |                 # Notebook not open - use file-based approach
239 |                 logger.info(f"Notebook {file_id} not open, using file mode")
240 | 
241 |                 import nbformat
242 |                 with open(notebook_path, 'r', encoding='utf-8') as f:
243 |                     notebook = nbformat.read(f, as_version=4)
244 | 
245 |                 if cell_index < 0 or cell_index >= len(notebook.cells):
246 |                     raise ValueError(f"Cell index {cell_index} out of range")
247 | 
248 |                 cell = notebook.cells[cell_index]
249 |                 if cell.cell_type != 'code':
250 |                     raise ValueError(f"Cell {cell_index} is not a code cell")
251 | 
252 |                 code_to_execute = cell.source
253 |                 if not code_to_execute.strip():
254 |                     return []
255 | 
256 |                 # Execute without RTC metadata
257 |                 outputs = await execute_via_execution_stack(
258 |                     serverapp=serverapp,
259 |                     kernel_id=kernel_id,
260 |                     code=code_to_execute,
261 |                     timeout=timeout_seconds
262 |                 )
263 | 
264 |                 # Write outputs back to file
265 |                 await self._write_outputs_to_cell(notebook_path, cell_index, outputs)
266 | 
267 |                 return safe_extract_outputs(outputs)
268 | 
269 |         elif mode == ServerMode.MCP_SERVER:
270 |             # MCP_SERVER mode: Use WebSocket with configurable execution approach
271 |             if ensure_kernel_alive_fn is None:
272 |                 raise ValueError("ensure_kernel_alive_fn is required for MCP_SERVER mode")
273 |             if wait_for_kernel_idle_fn is None:
274 |                 raise ValueError("wait_for_kernel_idle_fn is required for MCP_SERVER mode")
275 |             if notebook_manager is None:
276 |                 raise ValueError("notebook_manager is required for MCP_SERVER mode")
277 | 
278 |             # Validate function dependencies based on stream mode
279 |             if not stream:
280 |                 if safe_extract_outputs_fn is None:
281 |                     raise ValueError("safe_extract_outputs_fn is required for MCP_SERVER mode when stream=False")
282 |                 if execute_cell_with_forced_sync_fn is None:
283 |                     raise ValueError("execute_cell_with_forced_sync_fn is required for MCP_SERVER mode when stream=False")
284 |             else:
285 |                 if extract_output_fn is None:
286 |                     raise ValueError("extract_output_fn is required for MCP_SERVER mode when stream=True")
287 | 
288 |             kernel = ensure_kernel_alive_fn()
289 |             await wait_for_kernel_idle_fn(kernel, max_wait_seconds=30)
290 | 
291 |             async with notebook_manager.get_current_connection() as notebook:
292 |                 if cell_index < 0 or cell_index >= len(notebook):
293 |                     raise ValueError(f"Cell index {cell_index} out of range")
294 | 
295 |                 if stream:
296 |                     # Streaming mode: Real-time monitoring with progress updates
297 |                     logger.info(f"Executing cell {cell_index} in streaming mode (timeout: {timeout_seconds}s, interval: {progress_interval}s)")
298 | 
299 |                     outputs_log = []
300 | 
301 |                     # Start execution in background
302 |                     execution_task = asyncio.create_task(
303 |                         asyncio.to_thread(notebook.execute_cell, cell_index, kernel)
304 |                     )
305 | 
306 |                     start_time = time.time()
307 |                     last_output_count = 0
308 | 
309 |                     # Monitor progress
310 |                     while not execution_task.done():
311 |                         elapsed = time.time() - start_time
312 | 
313 |                         # Check timeout
314 |                         if elapsed > timeout_seconds:
315 |                             execution_task.cancel()
316 |                             outputs_log.append(f"[TIMEOUT at {elapsed:.1f}s: Cancelling execution]")
317 |                             try:
318 |                                 kernel.interrupt()
319 |                                 outputs_log.append("[Sent interrupt signal to kernel]")
320 |                             except Exception:
321 |                                 pass
322 |                             break
323 | 
324 |                         # Check for new outputs
325 |                         try:
326 |                             current_outputs = notebook[cell_index].get("outputs", [])
327 |                             if len(current_outputs) > last_output_count:
328 |                                 new_outputs = current_outputs[last_output_count:]
329 |                                 for output in new_outputs:
330 |                                     extracted = extract_output_fn(output)
331 |                                     if extracted.strip():
332 |                                         outputs_log.append(f"[{elapsed:.1f}s] {extracted}")
333 |                                 last_output_count = len(current_outputs)
334 | 
335 |                         except Exception as e:
336 |                             outputs_log.append(f"[{elapsed:.1f}s] Error checking outputs: {e}")
337 | 
338 |                         # Progress update
339 |                         if int(elapsed) % progress_interval == 0 and elapsed > 0:
340 |                             outputs_log.append(f"[PROGRESS: {elapsed:.1f}s elapsed, {last_output_count} outputs so far]")
341 | 
342 |                         await asyncio.sleep(1)
343 | 
344 |                     # Get final result
345 |                     if not execution_task.cancelled():
346 |                         try:
347 |                             await execution_task
348 |                             final_outputs = notebook[cell_index].get("outputs", [])
349 |                             outputs_log.append(f"[COMPLETED in {time.time() - start_time:.1f}s]")
350 | 
351 |                             # Add any final outputs not captured during monitoring
352 |                             if len(final_outputs) > last_output_count:
353 |                                 remaining = final_outputs[last_output_count:]
354 |                                 for output in remaining:
355 |                                     extracted = extract_output_fn(output)
356 |                                     if extracted.strip():
357 |                                         outputs_log.append(extracted)
358 | 
359 |                         except Exception as e:
360 |                             outputs_log.append(f"[ERROR: {e}]")
361 | 
362 |                     return outputs_log if outputs_log else ["[No output generated]"]
363 | 
364 |                 else:
365 |                     # Non-streaming mode: Use forced synchronization
366 |                     logger.info(f"Starting execution of cell {cell_index} with {timeout_seconds}s timeout")
367 | 
368 |                     try:
369 |                         # Use the forced sync function
370 |                         await execute_cell_with_forced_sync_fn(notebook, cell_index, kernel, timeout_seconds)
371 | 
372 |                         # Get final outputs
373 |                         outputs = notebook[cell_index].get("outputs", [])
374 |                         result = safe_extract_outputs_fn(outputs)
375 | 
376 |                         logger.info(f"Cell {cell_index} completed successfully with {len(result)} outputs")
377 |                         return result
378 | 
379 |                     except asyncio.TimeoutError as e:
380 |                         logger.error(f"Cell {cell_index} execution timed out: {e}")
381 |                         try:
382 |                             if kernel and hasattr(kernel, 'interrupt'):
383 |                                 kernel.interrupt()
384 |                                 logger.info("Sent interrupt signal to kernel")
385 |                         except Exception as interrupt_err:
386 |                             logger.error(f"Failed to interrupt kernel: {interrupt_err}")
387 | 
388 |                         # Return partial outputs if available
389 |                         try:
390 |                             outputs = notebook[cell_index].get("outputs", [])
391 |                             partial_outputs = safe_extract_outputs_fn(outputs)
392 |                             partial_outputs.append(f"[TIMEOUT ERROR: Execution exceeded {timeout_seconds} seconds]")
393 |                             return partial_outputs
394 |                         except Exception:
395 |                             pass
396 | 
397 |                         return [f"[TIMEOUT ERROR: Cell execution exceeded {timeout_seconds} seconds and was interrupted]"]
398 | 
399 |                     except Exception as e:
400 |                         logger.error(f"Error executing cell {cell_index}: {e}")
401 |                         raise
402 |         else:
403 |             raise ValueError(f"Invalid mode: {mode}")
404 | 
```

--------------------------------------------------------------------------------
/tests/test_common.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright (c) 2023-2024 Datalayer, Inc.
  2 | #
  3 | # BSD 3-Clause License
  4 | 
  5 | """
  6 | Common test infrastructure shared between MCP_SERVER and JUPYTER_SERVER mode tests.
  7 | 
  8 | This module provides:
  9 | - MCPClient: MCP protocol client for remote testing
 10 | - timeout_wrapper: Decorator for timeout handling
 11 | - requires_session: Decorator to check client session connection
 12 | - JUPYTER_TOOLS: List of expected tool names
 13 | - Helper functions for content extraction
 14 | """
 15 | 
 16 | import asyncio
 17 | import functools
 18 | import json
 19 | import logging
 20 | from contextlib import AsyncExitStack
 21 | 
 22 | import pytest
 23 | from mcp import ClientSession, types
 24 | from mcp.client.streamable_http import streamablehttp_client
 25 | 
 26 | 
 27 | # TODO: could be retrieved from code (inspect)
 28 | JUPYTER_TOOLS = [
 29 |     # Multi-Notebook Management Tools
 30 |     "use_notebook",
 31 |     "list_notebooks", 
 32 |     "restart_notebook",
 33 |     "unuse_notebook",
 34 |     # Cell Tools
 35 |     "insert_cell",
 36 |     "insert_execute_code_cell",
 37 |     "overwrite_cell_source",
 38 |     "execute_cell",
 39 |     "read_cells",
 40 |     "list_cells",
 41 |     "read_cell",
 42 |     "delete_cell",
 43 |     "execute_ipython",
 44 |     "list_files",
 45 |     "list_kernels",
 46 |     "assign_kernel_to_notebook",
 47 | ]
 48 | 
 49 | 
 50 | def timeout_wrapper(timeout_seconds=30):
 51 |     """Decorator to add timeout handling to async test functions
 52 |     
 53 |     Windows has known issues with asyncio and network timeouts that can cause 
 54 |     tests to hang indefinitely. This decorator adds a safety timeout specifically
 55 |     for Windows platforms while allowing other platforms to run normally.
 56 |     """
 57 |     def decorator(func):
 58 |         @functools.wraps(func)
 59 |         async def wrapper(*args, **kwargs):
 60 |             try:
 61 |                 return await asyncio.wait_for(func(*args, **kwargs), timeout=timeout_seconds)
 62 |             except asyncio.TimeoutError:
 63 |                 pytest.skip(f"Test {func.__name__} timed out ({timeout_seconds}s) - known platform limitation")
 64 |             except Exception as e:
 65 |                 # Check if it's a network timeout related to Windows
 66 |                 if "ReadTimeout" in str(e) or "TimeoutError" in str(e):
 67 |                     pytest.skip(f"Test {func.__name__} hit network timeout - known platform limitation: {e}")
 68 |                 raise
 69 |         return wrapper
 70 |     return decorator
 71 | 
 72 | 
 73 | def requires_session(func):
 74 |     """
 75 |     A decorator that checks if the instance has a connected session.
 76 |     """
 77 |     @functools.wraps(func)
 78 |     async def wrapper(self, *args, **kwargs):
 79 |         if not self._session:
 80 |             raise RuntimeError("Client session is not connected")
 81 |         # If the session exists, call the original method
 82 |         return await func(self, *args, **kwargs)
 83 |     
 84 |     return wrapper
 85 | 
 86 | 
 87 | class MCPClient:
 88 |     """A standard MCP client used to interact with the Jupyter MCP server
 89 | 
 90 |     Basically it's a client wrapper for the Jupyter MCP server.
 91 |     It uses the `requires_session` decorator to check if the session is connected.
 92 |     """
 93 | 
 94 |     def __init__(self, url):
 95 |         self.url = f"{url}/mcp"
 96 |         self._session: ClientSession | None = None
 97 |         self._exit_stack = AsyncExitStack()
 98 | 
 99 |     async def __aenter__(self):
100 |         """Initiate the session (enter session context)"""
101 |         streams_context = streamablehttp_client(self.url)
102 |         read_stream, write_stream, _ = await self._exit_stack.enter_async_context(
103 |             streams_context
104 |         )
105 |         session_context = ClientSession(read_stream, write_stream)
106 |         self._session = await self._exit_stack.enter_async_context(session_context)
107 |         await self._session.initialize()
108 |         return self
109 | 
110 |     async def __aexit__(self, exc_type, exc_val, exc_tb):
111 |         """Close the session (exit session context)"""
112 |         if self._exit_stack:
113 |             await self._exit_stack.aclose()
114 |         self._session = None
115 | 
116 |     @staticmethod
117 |     def _extract_text_content(result):
118 |         """Extract text content from a result"""
119 |         try:
120 |             logging.debug(f"_extract_text_content: result type={type(result)}, has content={hasattr(result, 'content')}, is tuple={isinstance(result, tuple)}, is list={isinstance(result, list)}")
121 |             
122 |             # Handle tuple results (content, metadata)
123 |             if isinstance(result, tuple) and len(result) >= 2:
124 |                 logging.debug(f"_extract_text_content: handling tuple, first element type={type(result[0])}")
125 |                 result = result[0]  # Get the content list from the tuple
126 |             
127 |             if hasattr(result, 'content') and result.content and len(result.content) > 0:
128 |                 # Check if all items are TextContent
129 |                 if all(isinstance(item, types.TextContent) for item in result.content):
130 |                     # If multiple TextContent items, return as JSON list
131 |                     if len(result.content) > 1:
132 |                         texts = [item.text for item in result.content]
133 |                         import json
134 |                         text = json.dumps(texts)
135 |                         logging.debug(f"_extract_text_content: extracted {len(texts)} TextContent items as JSON list")
136 |                         return text
137 |                     else:
138 |                         text = result.content[0].text
139 |                         logging.debug(f"_extract_text_content: extracted from result.content[0].text, length={len(text)}")
140 |                         return text
141 |             # Handle list results directly
142 |             elif isinstance(result, list) and len(result) > 0:
143 |                 # Check if all items are TextContent
144 |                 if all(isinstance(item, types.TextContent) for item in result):
145 |                     # If multiple TextContent items, return as JSON list
146 |                     if len(result) > 1:
147 |                         texts = [item.text for item in result]
148 |                         import json
149 |                         text = json.dumps(texts)
150 |                         logging.debug(f"_extract_text_content: extracted {len(texts)} TextContent items as JSON list")
151 |                         return text
152 |                     else:
153 |                         text = result[0].text
154 |                         logging.debug(f"_extract_text_content: extracted from list[0].text, length={len(text)}")
155 |                         return text
156 |         except (AttributeError, IndexError, TypeError) as e:
157 |             logging.debug(f"_extract_text_content error: {e}, result type: {type(result)}")
158 |         
159 |         logging.debug(f"_extract_text_content: returning None, could not extract")
160 |         return None
161 | 
162 |     def _get_structured_content_safe(self, result):
163 |         """Safely get structured content with fallback to text content parsing"""
164 |         content = getattr(result, 'structuredContent', None)
165 |         if content is None:
166 |             # Try to extract from text content as fallback
167 |             text_content = self._extract_text_content(result)
168 |             logging.debug(f"_get_structured_content_safe: text_content={repr(text_content[:200] if text_content else None)}")
169 |             if text_content:
170 |                 # Try to parse as JSON
171 |                 try:
172 |                     parsed = json.loads(text_content)
173 |                     logging.debug(f"_get_structured_content_safe: JSON parsed successfully, type={type(parsed)}")
174 |                     # Check if it's already a wrapped result or a direct response object
175 |                     if isinstance(parsed, dict):
176 |                         # If it has "result" key, it's already wrapped
177 |                         if "result" in parsed:
178 |                             return parsed
179 |                         # If it has keys like "index", "type", "source" it's a direct object (like CellInfo)
180 |                         elif any(key in parsed for key in ["index", "type", "source", "cells"]):
181 |                             return parsed
182 |                         # Otherwise wrap it
183 |                         else:
184 |                             return {"result": parsed}
185 |                     else:
186 |                         # Lists, strings, etc. - wrap them
187 |                         return {"result": parsed}
188 |                 except json.JSONDecodeError:
189 |                     # Not JSON - could be plain text or list representation
190 |                     # Try to evaluate as Python literal (for lists, etc.)
191 |                     try:
192 |                         import ast
193 |                         parsed = ast.literal_eval(text_content)
194 |                         logging.debug(f"_get_structured_content_safe: ast.literal_eval succeeded, type={type(parsed)}, value={repr(parsed)}")
195 |                         return {"result": parsed}
196 |                     except (ValueError, SyntaxError):
197 |                         # Plain text - return as-is
198 |                         logging.debug(f"_get_structured_content_safe: Plain text, wrapping in result dict")
199 |                         return {"result": text_content}
200 |             else:
201 |                 # No text content - check if we have ImageContent or mixed content
202 |                 if hasattr(result, 'content') and result.content:
203 |                     # Extract mixed content (ImageContent + TextContent)
204 |                     content_list = []
205 |                     for item in result.content:
206 |                         if isinstance(item, types.ImageContent):
207 |                             # Convert ImageContent to dict format
208 |                             content_list.append({
209 |                                 'type': 'image',
210 |                                 'data': item.data,
211 |                                 'mimeType': item.mimeType,
212 |                                 'annotations': getattr(item, 'annotations', None),
213 |                                 'meta': getattr(item, 'meta', None)
214 |                             })
215 |                         elif isinstance(item, types.TextContent):
216 |                             # Include text content if present
217 |                             content_list.append(item.text)
218 |                     
219 |                     if content_list:
220 |                         logging.debug(f"_get_structured_content_safe: extracted {len(content_list)} items from mixed content")
221 |                         return {"result": content_list}
222 |                 
223 |                 logging.warning(f"No text content available in result: {type(result)}")
224 |                 return None
225 |         return content
226 |     
227 |     async def _call_tool_safe(self, tool_name, arguments=None):
228 |         """Safely call a tool, returning None on error (for test compatibility)"""
229 |         try:
230 |             result = await self._session.call_tool(tool_name, arguments=arguments or {})  # type: ignore
231 |             
232 |             # Log raw result for debugging
233 |             logging.debug(f"_call_tool_safe({tool_name}): raw result type={type(result)}")
234 |             logging.debug(f"_call_tool_safe({tool_name}): raw result={result}")
235 |             
236 |             # Check if result contains error text (for MCP_SERVER mode where errors are wrapped in results)
237 |             text_content = self._extract_text_content(result)
238 |             if text_content and ("Error executing tool" in text_content or "is out of range" in text_content or "not found" in text_content):
239 |                 logging.warning(f"Tool {tool_name} returned error in result: {text_content[:100]}")
240 |                 return None
241 |             
242 |             # Also check structured content for errors (for JUPYTER_SERVER mode)
243 |             structured_content = self._get_structured_content_safe(result)
244 |             if structured_content:
245 |                 # Check if result contains error messages
246 |                 result_value = structured_content.get("result")
247 |                 if result_value:
248 |                     # Handle both string and list results
249 |                     error_text = ""
250 |                     if isinstance(result_value, str):
251 |                         error_text = result_value
252 |                     elif isinstance(result_value, list) and len(result_value) > 0:
253 |                         error_text = str(result_value[0])
254 |                     
255 |                     if error_text and ("[ERROR:" in error_text or "is out of range" in error_text or "not found" in error_text):
256 |                         logging.warning(f"Tool {tool_name} returned error in structured result: {error_text[:100]}")
257 |                         return None
258 |             
259 |             return result
260 |         except Exception as e:
261 |             # Log the error but return None for test compatibility (JUPYTER_SERVER mode)
262 |             logging.warning(f"Tool {tool_name} raised error: {e}")
263 |             return None
264 | 
265 |     @requires_session
266 |     async def list_tools(self):
267 |         return await self._session.list_tools()  # type: ignore
268 | 
269 |     # Multi-Notebook Management Methods
270 |     @requires_session
271 |     async def use_notebook(self, notebook_name, notebook_path=None, mode="connect", kernel_id=None):
272 |         arguments = {
273 |             "notebook_name": notebook_name, 
274 |             "mode": mode,
275 |             "kernel_id": kernel_id
276 |         }
277 |         # Only add notebook_path if provided (for switching, it's optional)
278 |         if notebook_path is not None:
279 |             arguments["notebook_path"] = notebook_path
280 |         
281 |         result = await self._session.call_tool("use_notebook", arguments=arguments)  # type: ignore
282 |         return self._extract_text_content(result)
283 |     
284 |     @requires_session
285 |     async def list_notebooks(self):
286 |         result = await self._session.call_tool("list_notebooks")  # type: ignore
287 |         return self._extract_text_content(result)
288 |     
289 |     @requires_session
290 |     async def restart_notebook(self, notebook_name):
291 |         result = await self._session.call_tool("restart_notebook", arguments={"notebook_name": notebook_name})  # type: ignore
292 |         return self._extract_text_content(result)
293 |     
294 |     @requires_session
295 |     async def unuse_notebook(self, notebook_name):
296 |         result = await self._session.call_tool("unuse_notebook", arguments={"notebook_name": notebook_name})  # type: ignore
297 |         return self._extract_text_content(result)
298 |     
299 |     @requires_session
300 |     async def insert_cell(self, cell_index, cell_type, cell_source):
301 |         result = await self._call_tool_safe("insert_cell", {"cell_index": cell_index, "cell_type": cell_type, "cell_source": cell_source})
302 |         return self._get_structured_content_safe(result) if result else None
303 | 
304 |     @requires_session
305 |     async def insert_execute_code_cell(self, cell_index, cell_source):
306 |         result = await self._call_tool_safe("insert_execute_code_cell", {"cell_index": cell_index, "cell_source": cell_source})
307 |         structured = self._get_structured_content_safe(result) if result else None
308 |         
309 |         # Special handling for insert_execute_code_cell: tool returns list[str | ImageContent]
310 |         # In JUPYTER_SERVER mode, the list gets flattened to a single string in TextContent
311 |         # In MCP_SERVER mode, it's properly wrapped in structured content as {"result": [...]}
312 |         if structured and "result" in structured:
313 |             result_value = structured["result"]
314 |             # If result is not already a list, wrap it in a list to match the tool's return type
315 |             if not isinstance(result_value, list):
316 |                 # Wrap the single value in a list
317 |                 structured["result"] = [result_value]
318 |         return structured
319 | 
320 |     @requires_session
321 |     async def read_cell(self, cell_index):
322 |         result = await self._call_tool_safe("read_cell", {"cell_index": cell_index})
323 |         return self._get_structured_content_safe(result) if result else None
324 | 
325 |     @requires_session
326 |     async def read_cells(self):
327 |         result = await self._session.call_tool("read_cells")  # type: ignore
328 |         structured = self._get_structured_content_safe(result)
329 |         
330 |         # read_cells returns a list of cell dicts directly
331 |         # If wrapped in {"result": ...}, unwrap it
332 |         if structured and "result" in structured:
333 |             cells_list = structured["result"]
334 |             # If the result is a list of JSON strings, parse each one
335 |             if isinstance(cells_list, list) and len(cells_list) > 0 and isinstance(cells_list[0], str):
336 |                 try:
337 |                     import json
338 |                     cells_list = [json.loads(cell_str) for cell_str in cells_list]
339 |                 except (json.JSONDecodeError, TypeError):
340 |                     pass
341 |             return cells_list
342 |         return structured
343 | 
344 |     @requires_session
345 |     async def list_cells(self, max_retries=3):
346 |         """List cells with retry mechanism for Windows compatibility"""
347 |         for attempt in range(max_retries):
348 |             try:
349 |                 result = await self._session.call_tool("list_cells")  # type: ignore
350 |                 text_result = self._extract_text_content(result)
351 |                 logging.debug(f"list_cells attempt {attempt + 1}: text_result type={type(text_result)}, len={len(text_result) if text_result else 0}")
352 |                 logging.debug(f"list_cells attempt {attempt + 1}: text_result[:500]={repr(text_result[:500]) if text_result else 'None'}")
353 |                 has_index_type = ("Index\tType" in text_result) if text_result else False
354 |                 logging.debug(f"list_cells attempt {attempt + 1}: has_index_type={has_index_type}")
355 |                 if text_result is not None and not text_result.startswith("Error") and "Index\tType" in text_result:
356 |                     return text_result
357 |                 else:
358 |                     logging.warning(f"list_cells returned unexpected result on attempt {attempt + 1}/{max_retries}")
359 |                     if attempt < max_retries - 1:
360 |                         await asyncio.sleep(0.5)
361 |             except Exception as e:
362 |                 logging.error(f"list_cells failed on attempt {attempt + 1}/{max_retries}: {e}")
363 |                 if attempt < max_retries - 1:
364 |                     await asyncio.sleep(0.5)
365 |                 else:
366 |                     logging.error("list_cells failed after all retries")
367 |                     return "Error: Failed to retrieve cell list after all retries"
368 |                     
369 |         return "Error: Failed to retrieve cell list after all retries"
370 | 
371 |     @requires_session
372 |     async def list_kernels(self):
373 |         """List all available kernels"""
374 |         result = await self._session.call_tool("list_kernels")  # type: ignore
375 |         return self._extract_text_content(result)
376 | 
377 |     @requires_session
378 |     async def delete_cell(self, cell_index):
379 |         result = await self._call_tool_safe("delete_cell", {"cell_index": cell_index})
380 |         return self._get_structured_content_safe(result) if result else None
381 | 
382 |     @requires_session
383 |     async def execute_cell_streaming(self, cell_index):
384 |         result = await self._call_tool_safe("execute_cell_streaming", {"cell_index": cell_index})
385 |         return self._get_structured_content_safe(result) if result else None
386 |     
387 |     @requires_session
388 |     async def execute_cell_with_progress(self, cell_index):
389 |         result = await self._call_tool_safe("execute_cell_with_progress", {"cell_index": cell_index})
390 |         structured = self._get_structured_content_safe(result) if result else None
391 |         
392 |         # Handle JUPYTER_SERVER mode flattening list responses to single string
393 |         if structured and "result" in structured:
394 |             result_value = structured["result"]
395 |             if not isinstance(result_value, list):
396 |                 structured["result"] = [result_value]
397 |         return structured
398 | 
399 |     @requires_session
400 |     async def execute_cell(self, cell_index, timeout_seconds=300, stream=False, progress_interval=5):
401 |         result = await self._call_tool_safe("execute_cell", {
402 |             "cell_index": cell_index,
403 |             "timeout_seconds": timeout_seconds,
404 |             "stream": stream,
405 |             "progress_interval": progress_interval
406 |         })
407 |         structured = self._get_structured_content_safe(result) if result else None
408 | 
409 |         # Handle JUPYTER_SERVER mode flattening list responses to single string
410 |         if structured and "result" in structured:
411 |             result_value = structured["result"]
412 |             if not isinstance(result_value, list):
413 |                 structured["result"] = [result_value]
414 |         return structured
415 | 
416 |     @requires_session
417 |     async def overwrite_cell_source(self, cell_index, cell_source):
418 |         result = await self._call_tool_safe("overwrite_cell_source", {"cell_index": cell_index, "cell_source": cell_source})
419 |         return self._get_structured_content_safe(result) if result else None
420 | 
421 |     @requires_session
422 |     async def execute_ipython(self, code, timeout=60):
423 |         result = await self._session.call_tool("execute_ipython", arguments={"code": code, "timeout": timeout})  # type: ignore
424 |         structured = self._get_structured_content_safe(result)
425 |         
426 |         # execute_ipython should always return a list of outputs
427 |         # If we got a plain string, wrap it as a list
428 |         if structured and "result" in structured:
429 |             result_val = structured["result"]
430 |             if isinstance(result_val, str):
431 |                 # Single output string, wrap as list
432 |                 structured["result"] = [result_val]
433 |             elif not isinstance(result_val, list):
434 |                 # Some other type, wrap as list
435 |                 structured["result"] = [result_val]
436 |         
437 |         return structured
438 | 
439 |     @requires_session
440 |     async def append_execute_code_cell(self, cell_source):
441 |         """Append and execute a code cell at the end of the notebook."""
442 |         return await self.insert_execute_code_cell(-1, cell_source)
443 | 
444 |     @requires_session
445 |     async def append_markdown_cell(self, cell_source):
446 |         """Append a markdown cell at the end of the notebook."""
447 |         return await self.insert_cell(-1, "markdown", cell_source)
448 |     
449 |     # Helper method to get cell count from list_cells output
450 |     @requires_session
451 |     async def get_cell_count(self):
452 |         """Get the number of cells by parsing list_cells output"""
453 |         cell_list = await self.list_cells()
454 |         if "Error" in cell_list or "Index\tType" not in cell_list:
455 |             return 0
456 |         lines = cell_list.split('\n')
457 |         data_lines = [line for line in lines if '\t' in line and not line.startswith('Index') and not line.startswith('-')]
458 |         return len(data_lines)
459 | 
```

--------------------------------------------------------------------------------
/tests/test_tools.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright (c) 2023-2024 Datalayer, Inc.
  2 | #
  3 | # BSD 3-Clause License
  4 | 
  5 | """
  6 | Integration tests for Jupyter MCP Server - Both MCP_SERVER and JUPYTER_SERVER modes.
  7 | 
  8 | This test suite validates the Jupyter MCP Server in both deployment modes:
  9 | 
 10 | 1. **MCP_SERVER Mode**: Standalone server using HTTP/WebSocket to Jupyter
 11 | 2. **JUPYTER_SERVER Mode**: Extension with direct serverapp API access
 12 | 
 13 | Tests are parametrized to run against both modes using the same MCPClient,
 14 | ensuring consistent behavior across both deployment patterns.
 15 | 
 16 | Launch the tests:
 17 | ```
 18 | $ pytest tests/test_server.py -v
 19 | ```
 20 | """
 21 | 
 22 | import logging
 23 | import platform
 24 | from http import HTTPStatus
 25 | 
 26 | import pytest
 27 | import requests
 28 | 
 29 | from .test_common import MCPClient, JUPYTER_TOOLS, timeout_wrapper
 30 | from .conftest import JUPYTER_TOKEN
 31 | 
 32 | 
 33 | ###############################################################################
 34 | # Health Tests
 35 | ###############################################################################
 36 | 
 37 | def test_jupyter_health(jupyter_server):
 38 |     """Test the Jupyter server health"""
 39 |     logging.info(f"Testing service health ({jupyter_server})")
 40 |     response = requests.get(
 41 |         f"{jupyter_server}/api/status",
 42 |         headers={
 43 |             "Authorization": f"token {JUPYTER_TOKEN}",
 44 |         },
 45 |     )
 46 |     assert response.status_code == HTTPStatus.OK
 47 | 
 48 | 
 49 | @pytest.mark.parametrize(
 50 |     "jupyter_mcp_server,kernel_expected_status",
 51 |     [(True, "alive"), (False, "not_initialized")],
 52 |     indirect=["jupyter_mcp_server"],
 53 |     ids=["start_runtime", "no_runtime"],
 54 | )
 55 | def test_mcp_health(jupyter_mcp_server, kernel_expected_status):
 56 |     """Test the MCP Jupyter server health"""
 57 |     logging.info(f"Testing MCP server health ({jupyter_mcp_server})")
 58 |     response = requests.get(f"{jupyter_mcp_server}/api/healthz")
 59 |     assert response.status_code == HTTPStatus.OK
 60 |     data = response.json()
 61 |     logging.debug(data)
 62 |     assert data.get("status") == "healthy"
 63 |     assert data.get("kernel_status") == kernel_expected_status
 64 | 
 65 | 
 66 | @pytest.mark.asyncio
 67 | async def test_mcp_tool_list(mcp_client_parametrized: MCPClient):
 68 |     """Check that the list of tools can be retrieved in both MCP_SERVER and JUPYTER_SERVER modes"""
 69 |     async with mcp_client_parametrized:
 70 |         tools = await mcp_client_parametrized.list_tools()
 71 |     tools_name = [tool.name for tool in tools.tools]
 72 |     logging.debug(f"tools_name: {tools_name}")
 73 |     assert len(tools_name) == len(JUPYTER_TOOLS) and sorted(tools_name) == sorted(
 74 |         JUPYTER_TOOLS
 75 |     )
 76 | 
 77 | 
 78 | @pytest.mark.asyncio
 79 | @timeout_wrapper(30)
 80 | async def test_markdown_cell(mcp_client_parametrized: MCPClient, content="Hello **World** !"):
 81 |     """Test markdown cell manipulation in both MCP_SERVER and JUPYTER_SERVER modes"""
 82 | 
 83 |     async def check_and_delete_markdown_cell(client: MCPClient, index, content):
 84 |         """Check and delete a markdown cell"""
 85 |         # reading and checking the content of the created cell
 86 |         cell_info = await client.read_cell(index)
 87 |         logging.debug(f"cell_info: {cell_info}")
 88 |         assert cell_info["index"] == index
 89 |         assert cell_info["type"] == "markdown"
 90 |         # TODO: don't now if it's normal to get a list of characters instead of a string
 91 |         assert "".join(cell_info["source"]) == content
 92 |         # reading all cells
 93 |         cells_info = await client.read_cells()
 94 |         assert cells_info is not None, "read_cells result should not be None"
 95 |         logging.debug(f"cells_info: {cells_info}")
 96 |         # Check that our cell is in the expected position with correct content
 97 |         assert "".join(cells_info[index]["source"]) == content
 98 |         # delete created cell
 99 |         result = await client.delete_cell(index)
100 |         assert result is not None, "delete_cell result should not be None"
101 |         assert result["result"] == f"Cell {index} (markdown) deleted successfully."
102 | 
103 |     async with mcp_client_parametrized:
104 |         # Get initial cell count
105 |         initial_count = await mcp_client_parametrized.get_cell_count()
106 |         
107 |         # append markdown cell using -1 index
108 |         result = await mcp_client_parametrized.insert_cell(-1, "markdown", content)
109 |         assert result is not None, "insert_cell result should not be None"
110 |         assert "Cell inserted successfully" in result["result"]
111 |         assert f"index {initial_count} (markdown)" in result["result"]
112 |         await check_and_delete_markdown_cell(mcp_client_parametrized, initial_count, content)
113 |         
114 |         # insert markdown cell at the end (safer than index 0)
115 |         result = await mcp_client_parametrized.insert_cell(initial_count, "markdown", content)
116 |         assert result is not None, "insert_cell result should not be None"
117 |         assert "Cell inserted successfully" in result["result"]
118 |         assert f"index {initial_count} (markdown)" in result["result"]
119 |         await check_and_delete_markdown_cell(mcp_client_parametrized, initial_count, content)
120 | 
121 | 
122 | @pytest.mark.asyncio
123 | @timeout_wrapper(30)
124 | async def test_code_cell(mcp_client_parametrized: MCPClient, content="1 + 1"):
125 |     """Test code cell manipulation in both MCP_SERVER and JUPYTER_SERVER modes"""
126 |     async def check_and_delete_code_cell(client: MCPClient, index, content):
127 |         """Check and delete a code cell"""
128 |         # reading and checking the content of the created cell
129 |         cell_info = await client.read_cell(index)
130 |         logging.debug(f"cell_info: {cell_info}")
131 |         assert cell_info["index"] == index
132 |         assert cell_info["type"] == "code"
133 |         assert "".join(cell_info["source"]) == content
134 |         # reading all cells
135 |         cells_info = await client.read_cells()
136 |         logging.debug(f"cells_info: {cells_info}")
137 |         # read_cells returns the list directly (unwrapped)
138 |         assert "".join(cells_info[index]["source"]) == content
139 |         # delete created cell
140 |         result = await client.delete_cell(index)
141 |         assert result["result"] == f"Cell {index} (code) deleted successfully."
142 | 
143 |     async with mcp_client_parametrized:
144 |         # Get initial cell count
145 |         initial_count = await mcp_client_parametrized.get_cell_count()
146 |         
147 |         # append and execute code cell using -1 index
148 |         index = initial_count
149 |         code_result = await mcp_client_parametrized.insert_execute_code_cell(-1, content)
150 |         logging.debug(f"code_result: {code_result}")
151 |         assert code_result is not None, "insert_execute_code_cell result should not be None"
152 |         assert len(code_result["result"]) > 0, "insert_execute_code_cell should return non-empty result"
153 |         # The first output should be the execution result, convert to int for comparison
154 |         first_output = code_result["result"][0]
155 |         first_output_value = int(first_output) if isinstance(first_output, str) else first_output
156 |         assert first_output_value == eval(content), f"Expected {eval(content)}, got {first_output_value}"
157 |         await check_and_delete_code_cell(mcp_client_parametrized, index, content)
158 |         
159 |         # insert and execute code cell at the end (safer than index 0)
160 |         index = initial_count
161 |         code_result = await mcp_client_parametrized.insert_execute_code_cell(index, content)
162 |         logging.debug(f"code_result: {code_result}")
163 |         expected_result = eval(content)
164 |         assert int(code_result["result"][0]) == expected_result
165 |         # overwrite content and test different cell execution modes
166 |         content = f"({content}) * 2"
167 |         expected_result = eval(content)
168 |         result = await mcp_client_parametrized.overwrite_cell_source(index, content)
169 |         logging.debug(f"result: {result}")
170 |         # The server returns a message with diff content
171 |         assert "Cell" in result["result"] and "overwritten successfully" in result["result"]
172 |         assert "diff" in result["result"]  # Should contain diff output
173 |         code_result = await mcp_client_parametrized.execute_cell(index)
174 |         assert int(code_result["result"][0]) == expected_result
175 |         await check_and_delete_code_cell(mcp_client_parametrized, index, content)
176 | 
177 | 
178 | @pytest.mark.asyncio
179 | @timeout_wrapper(30)
180 | async def test_list_cells(mcp_client_parametrized: MCPClient):
181 |     """Test list_cells functionality in both MCP_SERVER and JUPYTER_SERVER modes"""
182 |     async with mcp_client_parametrized:
183 |         # Test initial list_cells (notebook.ipynb has multiple cells)
184 |         cell_list = await mcp_client_parametrized.list_cells()
185 |         logging.debug(f"Initial cell list: {cell_list}")
186 |         assert isinstance(cell_list, str)
187 |         
188 |         # Check for error conditions and skip if network issues occur
189 |         if cell_list.startswith("Error executing tool list_cells") or cell_list.startswith("Error: Failed to retrieve"):
190 |             pytest.skip(f"Network timeout occurred during list_cells operation: {cell_list}")
191 |         
192 |         assert "Index\tType\tCount\tFirst Line" in cell_list
193 |         # The notebook has both markdown and code cells - just verify structure
194 |         lines = cell_list.split('\n')
195 |         data_lines = [line for line in lines if '\t' in line and not line.startswith('Index')]
196 |         assert len(data_lines) >= 1  # Should have at least some cells
197 |         
198 |         # Add a markdown cell and test again
199 |         markdown_content = "# Test Markdown Cell"
200 |         await mcp_client_parametrized.insert_cell(-1, "markdown", markdown_content)
201 |         
202 |         # Check list_cells with added markdown cell
203 |         cell_list = await mcp_client_parametrized.list_cells()
204 |         logging.debug(f"Cell list after adding markdown: {cell_list}")
205 |         lines = cell_list.split('\n')
206 |         
207 |         # Should have header, separator, and multiple data lines
208 |         assert len(lines) >= 4  # header + separator + at least some cells
209 |         assert "Index\tType\tCount\tFirst Line" in lines[0]
210 |         
211 |         # Check that the added cell is listed
212 |         data_lines = [line for line in lines if '\t' in line and not line.startswith('Index')]
213 |         assert len(data_lines) >= 10  # Should have at least the original 10 cells
214 |         
215 |         # Check that our added cell appears in the list
216 |         assert any("# Test Markdown Cell" in line for line in data_lines)
217 |         
218 |         # Add a code cell with long content to test truncation
219 |         long_code = "# This is a very long comment that should be truncated when displayed in the list because it exceeds the 50 character limit"
220 |         await mcp_client_parametrized.insert_execute_code_cell(-1, "print('Hello World')")
221 |         
222 |         # Check list_cells with truncated content
223 |         cell_list = await mcp_client_parametrized.list_cells()
224 |         logging.debug(f"Cell list after adding long code: {cell_list}")
225 |         
226 |         # Clean up by deleting added cells (in reverse order)
227 |         # Get current cell count to determine indices of added cells
228 |         current_count = await mcp_client_parametrized.get_cell_count()
229 |         # Delete the last two cells we added
230 |         await mcp_client_parametrized.delete_cell(current_count - 1)  # Remove the code cell
231 |         await mcp_client_parametrized.delete_cell(current_count - 2)  # Remove the markdown cell
232 | 
233 | @pytest.mark.asyncio
234 | @timeout_wrapper(30)
235 | async def test_overwrite_cell_diff(mcp_client_parametrized: MCPClient):
236 |     """Test overwrite_cell_source diff functionality in both MCP_SERVER and JUPYTER_SERVER modes"""
237 |     async with mcp_client_parametrized:
238 |         # Get initial cell count
239 |         initial_count = await mcp_client_parametrized.get_cell_count()
240 |         
241 |         # Add a code cell with initial content
242 |         initial_content = "x = 10\nprint(x)"
243 |         await mcp_client_parametrized.append_execute_code_cell(initial_content)
244 |         cell_index = initial_count
245 |         
246 |         # Overwrite with modified content
247 |         new_content = "x = 20\ny = 30\nprint(x + y)"
248 |         result = await mcp_client_parametrized.overwrite_cell_source(cell_index, new_content)
249 |         
250 |         # Verify diff output format
251 |         assert result is not None, "overwrite_cell_source should not return None for valid input"
252 |         result_text = result.get("result", "") if isinstance(result, dict) else str(result)
253 |         assert f"Cell {cell_index} overwritten successfully!" in result_text
254 |         assert "```diff" in result_text
255 |         assert "```" in result_text  # Should have closing diff block
256 |         
257 |         # Verify diff content shows changes
258 |         assert "-" in result_text  # Should show deletions
259 |         assert "+" in result_text  # Should show additions
260 |         
261 |         # Test overwriting with identical content (no changes)
262 |         result_no_change = await mcp_client_parametrized.overwrite_cell_source(cell_index, new_content)
263 |         assert result_no_change is not None, "overwrite_cell_source should not return None"
264 |         no_change_text = result_no_change.get("result", "") if isinstance(result_no_change, dict) else str(result_no_change)
265 |         assert "no changes detected" in no_change_text
266 |         
267 |         # Test overwriting markdown cell
268 |         await mcp_client_parametrized.append_markdown_cell("# Original Title")
269 |         markdown_index = initial_count + 1
270 |         
271 |         markdown_result = await mcp_client_parametrized.overwrite_cell_source(markdown_index, "# Updated Title\n\nSome content")
272 |         assert markdown_result is not None, "overwrite_cell_source should not return None for markdown cell"
273 |         markdown_text = markdown_result.get("result", "") if isinstance(markdown_result, dict) else str(markdown_result)
274 |         assert f"Cell {markdown_index} overwritten successfully!" in markdown_text
275 |         assert "```diff" in markdown_text
276 |         assert "Updated Title" in markdown_text
277 |         
278 |         # Clean up: delete the test cells
279 |         await mcp_client_parametrized.delete_cell(markdown_index)  # Delete markdown cell first (higher index)
280 |         await mcp_client_parametrized.delete_cell(cell_index)      # Then delete code cell
281 | 
282 | @pytest.mark.asyncio
283 | @timeout_wrapper(30)
284 | async def test_bad_index(mcp_client_parametrized: MCPClient, index=99):
285 |     """Test behavior of all index-based tools if the index does not exist in both modes"""
286 |     async with mcp_client_parametrized:
287 |         assert await mcp_client_parametrized.read_cell(index) is None
288 |         assert await mcp_client_parametrized.insert_cell(index, "markdown", "test") is None
289 |         assert await mcp_client_parametrized.insert_execute_code_cell(index, "1 + 1") is None
290 |         assert await mcp_client_parametrized.overwrite_cell_source(index, "1 + 1") is None
291 |         assert await mcp_client_parametrized.execute_cell(index) is None
292 |         assert await mcp_client_parametrized.delete_cell(index) is None
293 | 
294 | 
295 | @pytest.mark.asyncio
296 | @timeout_wrapper(30)
297 | async def test_multimodal_output(mcp_client_parametrized: MCPClient):
298 |     """Test multimodal output functionality with image generation in both modes"""
299 |     async with mcp_client_parametrized:
300 |         # Get initial cell count
301 |         initial_count = await mcp_client_parametrized.get_cell_count()
302 |         
303 |         # Test image generation code using PIL (lightweight)
304 |         image_code = """
305 | from PIL import Image, ImageDraw
306 | import io
307 | import base64
308 | 
309 | # Create a simple test image using PIL
310 | width, height = 200, 100
311 | image = Image.new('RGB', (width, height), color='white')
312 | draw = ImageDraw.Draw(image)
313 | 
314 | # Draw a simple pattern
315 | draw.rectangle([10, 10, 190, 90], outline='blue', width=2)
316 | draw.ellipse([20, 20, 80, 80], fill='red')
317 | draw.text((100, 40), "Test Image", fill='black')
318 | 
319 | # Convert to PNG and display
320 | buffer = io.BytesIO()
321 | image.save(buffer, format='PNG')
322 | buffer.seek(0)
323 | 
324 | # Display the image (this should generate image/png output)
325 | from IPython.display import Image as IPythonImage, display
326 | display(IPythonImage(buffer.getvalue()))
327 | """
328 |         
329 |         # Execute the image generation code
330 |         result = await mcp_client_parametrized.insert_execute_code_cell(-1, image_code)
331 |         cell_index = initial_count
332 |         
333 |         # Check that result is not None and contains outputs
334 |         assert result is not None, "Result should not be None"
335 |         assert "result" in result, "Result should contain 'result' key"
336 |         outputs = result["result"]
337 |         assert isinstance(outputs, list), "Outputs should be a list"
338 |         
339 |         # Check for image output or placeholder
340 |         has_image_output = False
341 |         for output in outputs:
342 |             if isinstance(output, str):
343 |                 # Check for image placeholder or actual image content
344 |                 if ("Image Output (PNG)" in output or 
345 |                     "image display" in output.lower() or
346 |                     output.strip() == ''):
347 |                     has_image_output = True
348 |                     break
349 |             elif isinstance(output, dict):
350 |                 # Check for ImageContent dictionary format (from safe_extract_outputs)
351 |                 if (output.get('type') == 'image' and 
352 |                     'data' in output and 
353 |                     output.get('mimeType') == 'image/png'):
354 |                     has_image_output = True
355 |                     logging.info(f"Found ImageContent object with {len(output['data'])} bytes of PNG data")
356 |                     break
357 |                 # Check for nbformat output structure (from ExecutionStack)
358 |                 elif (output.get('output_type') == 'display_data' and 
359 |                       'data' in output and 
360 |                       'image/png' in output['data']):
361 |                     has_image_output = True
362 |                     png_data = output['data']['image/png']
363 |                     logging.info(f"Found nbformat display_data with {len(png_data)} bytes of PNG data")
364 |                     break
365 |             elif hasattr(output, 'data') and hasattr(output, 'mimeType'):
366 |                 # This would be an actual ImageContent object
367 |                 if output.mimeType == "image/png":
368 |                     has_image_output = True
369 |                     break
370 |         
371 |         # We should have some indication of image output
372 |         assert has_image_output, f"Expected image output indication, got: {outputs}"
373 |         
374 |         # Test with ALLOW_IMG_OUTPUT environment variable control
375 |         # Note: In actual deployment, this would be controlled via environment variables
376 |         # For testing, we just verify the code structure is correct
377 |         logging.info(f"Multimodal test completed with outputs: {outputs}")
378 |         
379 |         # Clean up: delete the test cell
380 |         await mcp_client_parametrized.delete_cell(cell_index)
381 | 
382 | 
383 | ###############################################################################
384 | # Multi-Notebook Management Tests
385 | ###############################################################################
386 | 
387 | @pytest.mark.asyncio
388 | @timeout_wrapper(30)
389 | async def test_multi_notebook_management(mcp_client_parametrized: MCPClient):
390 |     """Test multi-notebook management functionality in both modes"""
391 |     async with mcp_client_parametrized:
392 |         # Test initial state - should show default notebook or no notebooks
393 |         initial_list = await mcp_client_parametrized.list_notebooks()
394 |         logging.debug(f"Initial notebook list: {initial_list}")
395 |         
396 |         # Connect to a new notebook
397 |         connect_result = await mcp_client_parametrized.use_notebook("test_notebooks", "new.ipynb", "connect")
398 |         logging.debug(f"Connect result: {connect_result}")
399 |         assert "Successfully using notebook 'test_notebooks'" in connect_result
400 |         assert "new.ipynb" in connect_result
401 |         
402 |         # List notebooks - should now show the connected notebook
403 |         notebook_list = await mcp_client_parametrized.list_notebooks()
404 |         logging.debug(f"Notebook list after connect: {notebook_list}")
405 |         assert "test_notebooks" in notebook_list
406 |         assert "new.ipynb" in notebook_list
407 |         assert "✓" in notebook_list  # Should be marked as current
408 |         
409 |         # Try to connect to the same notebook again (should fail)
410 |         duplicate_result = await mcp_client_parametrized.use_notebook("test_notebooks", "new.ipynb")
411 |         assert "already using" in duplicate_result
412 |         
413 |         # Test switching between notebooks
414 |         if "default" in notebook_list:
415 |             use_result = await mcp_client_parametrized.use_notebook("default")
416 |             logging.debug(f"Switch to default result: {use_result}")
417 |             assert "Successfully switched to notebook 'default'" in use_result
418 |             
419 |             # Switch back to test notebook
420 |             use_back_result = await mcp_client_parametrized.use_notebook("test_notebooks")
421 |             assert "Successfully switched to notebook 'test_notebooks'" in use_back_result
422 |         
423 |         # Test cell operations on the new notebook
424 |         # First get the cell count of new.ipynb (should have some cells)
425 |         cell_count = await mcp_client_parametrized.get_cell_count()
426 |         assert cell_count >= 2, f"new.ipynb should have at least 2 cells, got {cell_count}"
427 |         
428 |         # Add a test cell to the new notebook
429 |         test_content = "# Multi-notebook test\nprint('Testing multi-notebook')"
430 |         insert_result = await mcp_client_parametrized.insert_cell(-1, "code", test_content)
431 |         assert "Cell inserted successfully" in insert_result["result"]
432 |         
433 |         # Execute the cell
434 |         execute_result = await mcp_client_parametrized.insert_execute_code_cell(-1, "2 + 3")
435 |         assert "5" in str(execute_result["result"])
436 |         
437 |         # Test restart notebook
438 |         restart_result = await mcp_client_parametrized.restart_notebook("test_notebooks")
439 |         logging.debug(f"Restart result: {restart_result}")
440 |         assert "restarted successfully" in restart_result
441 |         
442 |         # Test unuse notebook
443 |         disconnect_result = await mcp_client_parametrized.unuse_notebook("test_notebooks")
444 |         logging.debug(f"Unuse result: {disconnect_result}")
445 |         assert "unused successfully" in disconnect_result
446 |         
447 |         # Verify notebook is no longer in the list
448 |         final_list = await mcp_client_parametrized.list_notebooks()
449 |         logging.debug(f"Final notebook list: {final_list}")
450 |         if "No notebooks are currently connected" not in final_list:
451 |             assert "test_notebooks" not in final_list
452 | 
453 | 
454 | @pytest.mark.asyncio
455 | @timeout_wrapper(30)
456 | async def test_multi_notebook_cell_operations(mcp_client_parametrized: MCPClient):
457 |     """Test cell operations across multiple notebooks in both modes"""
458 |     async with mcp_client_parametrized:
459 |         # Connect to the new notebook
460 |         await mcp_client_parametrized.use_notebook("notebook_a", "new.ipynb")
461 |         
462 |         # Get initial cell count for notebook A
463 |         count_a = await mcp_client_parametrized.get_cell_count()
464 |         
465 |         # Add a cell to notebook A
466 |         await mcp_client_parametrized.insert_cell(-1, "markdown", "# This is notebook A")
467 |         
468 |         # Connect to default notebook (if it exists)
469 |         try:
470 |             # Try to connect to notebook.ipynb as notebook_b
471 |             await mcp_client_parametrized.use_notebook("notebook_b", "notebook.ipynb")
472 |             
473 |             # Switch to notebook B
474 |             await mcp_client_parametrized.use_notebook("notebook_b")
475 |             
476 |             # Get cell count for notebook B
477 |             count_b = await mcp_client_parametrized.get_cell_count()
478 |             
479 |             # Add a cell to notebook B
480 |             await mcp_client_parametrized.insert_cell(-1, "markdown", "# This is notebook B")
481 |             
482 |             # Switch back to notebook A
483 |             await mcp_client_parametrized.use_notebook("notebook_a")
484 |             
485 |             # Verify we're working with notebook A
486 |             cell_list_a = await mcp_client_parametrized.list_cells()
487 |             assert "This is notebook A" in cell_list_a
488 |             
489 |             # Switch to notebook B and verify
490 |             await mcp_client_parametrized.use_notebook("notebook_b")
491 |             cell_list_b = await mcp_client_parametrized.list_cells()
492 |             assert "This is notebook B" in cell_list_b
493 |             
494 |             # Clean up - unuse both notebooks
495 |             await mcp_client_parametrized.unuse_notebook("notebook_a")
496 |             await mcp_client_parametrized.unuse_notebook("notebook_b")
497 |             
498 |         except Exception as e:
499 |             logging.warning(f"Could not test with notebook.ipynb: {e}")
500 |             # Clean up notebook A only
501 |             await mcp_client_parametrized.unuse_notebook("notebook_a")
502 | 
503 | 
504 | @pytest.mark.asyncio 
505 | @timeout_wrapper(30)
506 | async def test_notebooks_error_cases(mcp_client_parametrized: MCPClient):
507 |     """Test error handling for notebook management in both modes"""
508 |     async with mcp_client_parametrized:
509 |         # Test connecting to non-existent notebook
510 |         error_result = await mcp_client_parametrized.use_notebook("nonexistent", "nonexistent.ipynb")
511 |         logging.debug(f"Nonexistent notebook result: {error_result}")
512 |         assert "not found" in error_result.lower() or "not a valid file" in error_result.lower()
513 |         
514 |         # Test operations on non-used notebook
515 |         restart_error = await mcp_client_parametrized.restart_notebook("nonexistent_notebook")
516 |         assert "not connected" in restart_error
517 |         
518 |         disconnect_error = await mcp_client_parametrized.unuse_notebook("nonexistent_notebook") 
519 |         assert "not connected" in disconnect_error
520 |         
521 |         use_error = await mcp_client_parametrized.use_notebook("nonexistent_notebook")
522 |         assert "not connected" in use_error
523 |         
524 |         # Test invalid notebook paths
525 |         invalid_path_result = await mcp_client_parametrized.use_notebook("test", "../invalid/path.ipynb")
526 |         assert "not found" in invalid_path_result.lower() or "not a valid file" in invalid_path_result.lower()
527 | 
528 | 
529 | ###############################################################################
530 | # execute_ipython Tests
531 | ###############################################################################
532 | 
533 | @pytest.mark.asyncio
534 | @timeout_wrapper(30)
535 | async def test_execute_ipython_python_code(mcp_client_parametrized: MCPClient):
536 |     """Test execute_ipython with basic Python code in both modes"""
537 |     async with mcp_client_parametrized:
538 |         # Test simple Python code
539 |         result = await mcp_client_parametrized.execute_ipython("print('Hello IPython World!')")
540 |         
541 |         # On Windows, if result is None it's likely due to timeout - skip the test
542 |         if platform.system() == "Windows" and result is None:
543 |             pytest.skip("execute_ipython timed out on Windows - known platform limitation")
544 |         
545 |         assert result is not None, "execute_ipython result should not be None"
546 |         assert "result" in result, "Result should contain 'result' key"
547 |         outputs = result["result"]
548 |         assert isinstance(outputs, list), "Outputs should be a list"
549 |         
550 |         # Check for expected output
551 |         output_text = "".join(str(output) for output in outputs)
552 |         assert "Hello IPython World!" in output_text or "[No output generated]" in output_text
553 |         
554 |         # Test mathematical calculation
555 |         calc_result = await mcp_client_parametrized.execute_ipython("result = 2 ** 10\nprint(f'2^10 = {result}')")
556 |         
557 |         if platform.system() == "Windows" and calc_result is None:
558 |             pytest.skip("execute_ipython timed out on Windows - known platform limitation")
559 |             
560 |         assert calc_result is not None
561 |         calc_outputs = calc_result["result"]
562 |         calc_text = "".join(str(output) for output in calc_outputs)
563 |         assert "2^10 = 1024" in calc_text or "[No output generated]" in calc_text
564 | 
565 | 
566 | @pytest.mark.asyncio
567 | @timeout_wrapper(30)
568 | async def test_execute_ipython_magic_commands(mcp_client_parametrized: MCPClient):
569 |     """Test execute_ipython with IPython magic commands in both modes"""
570 |     async with mcp_client_parametrized:
571 |         # Test %who magic command (list variables)
572 |         result = await mcp_client_parametrized.execute_ipython("%who")
573 |         
574 |         # On Windows, if result is None it's likely due to timeout - skip the test
575 |         if platform.system() == "Windows" and result is None:
576 |             pytest.skip("execute_ipython timed out on Windows - known platform limitation")
577 |         
578 |         assert result is not None, "execute_ipython result should not be None"
579 |         outputs = result["result"]
580 |         assert isinstance(outputs, list), "Outputs should be a list"
581 |         
582 |         # Set a variable first, then use %who to see it
583 |         var_result = await mcp_client_parametrized.execute_ipython("test_var = 42")
584 |         if platform.system() == "Windows" and var_result is None:
585 |             pytest.skip("execute_ipython timed out on Windows - known platform limitation")
586 |             
587 |         who_result = await mcp_client_parametrized.execute_ipython("%who")
588 |         if platform.system() == "Windows" and who_result is None:
589 |             pytest.skip("execute_ipython timed out on Windows - known platform limitation")
590 |             
591 |         who_outputs = who_result["result"]
592 |         who_text = "".join(str(output) for output in who_outputs)
593 |         # %who should show our variable (or no output if variables exist but aren't shown)
594 |         # This test mainly ensures %who doesn't crash
595 |         
596 |         # Test %timeit magic command
597 |         timeit_result = await mcp_client_parametrized.execute_ipython("%timeit sum(range(100))")
598 |         if platform.system() == "Windows" and timeit_result is None:
599 |             pytest.skip("execute_ipython timed out on Windows - known platform limitation")
600 |             
601 |         assert timeit_result is not None
602 |         timeit_outputs = timeit_result["result"]
603 |         timeit_text = "".join(str(output) for output in timeit_outputs)
604 |         # timeit should produce some timing output or complete without error
605 |         assert len(timeit_text) >= 0  # Just ensure no crash
606 | 
607 | 
608 | @pytest.mark.asyncio 
609 | @timeout_wrapper(30)
610 | async def test_execute_ipython_shell_commands(mcp_client_parametrized: MCPClient):
611 |     """Test execute_ipython with shell commands in both modes"""
612 |     async with mcp_client_parametrized:
613 |         # Test basic shell command - echo (works on most systems)
614 |         result = await mcp_client_parametrized.execute_ipython("!echo 'Hello from shell'")
615 |         
616 |         # On Windows, if result is None it's likely due to timeout - skip the test
617 |         if platform.system() == "Windows" and result is None:
618 |             pytest.skip("execute_ipython timed out on Windows - known platform limitation")
619 |         
620 |         assert result is not None, "execute_ipython result should not be None"
621 |         outputs = result["result"]
622 |         assert isinstance(outputs, list), "Outputs should be a list"
623 |         
624 |         output_text = "".join(str(output) for output in outputs)
625 |         # Shell command should either work or be handled gracefully
626 |         assert len(output_text) >= 0  # Just ensure no crash
627 |         
628 |         # Test Python version check
629 |         python_result = await mcp_client_parametrized.execute_ipython("!python --version")
630 |         if platform.system() == "Windows" and python_result is None:
631 |             pytest.skip("execute_ipython timed out on Windows - known platform limitation")
632 |             
633 |         assert python_result is not None
634 |         python_outputs = python_result["result"]
635 |         python_text = "".join(str(output) for output in python_outputs)
636 |         # Should show Python version or complete without error
637 |         assert len(python_text) >= 0
638 | 
639 | 
640 | @pytest.mark.asyncio
641 | @timeout_wrapper(30)
642 | async def test_execute_ipython_timeout(mcp_client_parametrized: MCPClient):
643 |     """Test execute_ipython timeout functionality in both modes"""
644 |     async with mcp_client_parametrized:
645 |         # Test with very short timeout on a potentially long-running command
646 |         result = await mcp_client_parametrized.execute_ipython("import time; time.sleep(5)", timeout=2)
647 |         
648 |         # On Windows, if result is None it's likely due to timeout - skip the test
649 |         if platform.system() == "Windows" and result is None:
650 |             pytest.skip("execute_ipython timed out on Windows - known platform limitation")
651 |         
652 |         assert result is not None
653 |         outputs = result["result"]
654 |         output_text = "".join(str(output) for output in outputs)
655 |         # Should either complete quickly or timeout
656 |         assert "TIMEOUT ERROR" in output_text or len(output_text) >= 0
657 | 
658 | 
659 | @pytest.mark.asyncio
660 | @timeout_wrapper(30)
661 | async def test_execute_ipython_error_handling(mcp_client_parametrized: MCPClient):
662 |     """Test execute_ipython error handling in both modes"""
663 |     async with mcp_client_parametrized:
664 |         # Test syntax error
665 |         result = await mcp_client_parametrized.execute_ipython("invalid python syntax <<<")
666 |         
667 |         # On Windows, if result is None it's likely due to timeout - skip the test
668 |         if platform.system() == "Windows" and result is None:
669 |             pytest.skip("execute_ipython timed out on Windows - known platform limitation")
670 |         
671 |         assert result is not None
672 |         outputs = result["result"]
673 |         output_text = "".join(str(output) for output in outputs)
674 |         # Should handle the error gracefully
675 |         assert len(output_text) >= 0  # Ensure no crash
676 |         
677 |         # Test runtime error  
678 |         runtime_result = await mcp_client_parametrized.execute_ipython("undefined_variable")
679 |         if platform.system() == "Windows" and runtime_result is None:
680 |             pytest.skip("execute_ipython timed out on Windows - known platform limitation")
681 |             
682 |         assert runtime_result is not None
683 |         runtime_outputs = runtime_result["result"]
684 |         runtime_text = "".join(str(output) for output in runtime_outputs)
685 |         # Should handle the error gracefully
686 |         assert len(runtime_text) >= 0
```

--------------------------------------------------------------------------------
/jupyter_mcp_server/utils.py:
--------------------------------------------------------------------------------

```python
   1 | # Copyright (c) 2023-2024 Datalayer, Inc.
   2 | #
   3 | # BSD 3-Clause License
   4 | 
   5 | import re
   6 | from typing import Any, Union
   7 | from mcp.types import ImageContent
   8 | from .env import ALLOW_IMG_OUTPUT
   9 | 
  10 | 
  11 | def get_current_notebook_context(notebook_manager=None):
  12 |     """
  13 |     Get the current notebook path and kernel ID for JUPYTER_SERVER mode.
  14 |     
  15 |     Args:
  16 |         notebook_manager: NotebookManager instance (optional)
  17 |         
  18 |     Returns:
  19 |         Tuple of (notebook_path, kernel_id)
  20 |         Falls back to config values if notebook_manager not provided
  21 |     """
  22 |     from .config import get_config
  23 |     
  24 |     notebook_path = None
  25 |     kernel_id = None
  26 |     
  27 |     if notebook_manager:
  28 |         # Try to get current notebook info from manager
  29 |         notebook_path = notebook_manager.get_current_notebook_path()
  30 |         current_notebook = notebook_manager.get_current_notebook() or "default"
  31 |         kernel_id = notebook_manager.get_kernel_id(current_notebook)
  32 |     
  33 |     # Fallback to config if not found in manager
  34 |     if not notebook_path or not kernel_id:
  35 |         config = get_config()
  36 |         if not notebook_path:
  37 |             notebook_path = config.document_id
  38 |         if not kernel_id:
  39 |             kernel_id = config.runtime_id
  40 |     
  41 |     return notebook_path, kernel_id
  42 | 
  43 | 
  44 | def extract_output(output: Union[dict, Any]) -> Union[str, ImageContent]:
  45 |     """
  46 |     Extracts readable output from a Jupyter cell output dictionary.
  47 |     Handles both traditional and CRDT-based Jupyter formats.
  48 | 
  49 |     Args:
  50 |         output: The output from a Jupyter cell (dict or CRDT object).
  51 | 
  52 |     Returns:
  53 |         str: A string representation of the output.
  54 |     """
  55 |     # Handle pycrdt._text.Text objects
  56 |     if hasattr(output, 'source'):
  57 |         return str(output.source)
  58 |     
  59 |     # Handle CRDT YText objects
  60 |     if hasattr(output, '__str__') and 'Text' in str(type(output)):
  61 |         text_content = str(output)
  62 |         return strip_ansi_codes(text_content)
  63 |     
  64 |     # Handle lists (common in error tracebacks)
  65 |     if isinstance(output, list):
  66 |         return '\n'.join(extract_output(item) for item in output)
  67 |     
  68 |     # Handle traditional dictionary format
  69 |     if not isinstance(output, dict):
  70 |         return strip_ansi_codes(str(output))
  71 |     
  72 |     output_type = output.get("output_type")
  73 |     
  74 |     if output_type == "stream":
  75 |         text = output.get("text", "")
  76 |         if isinstance(text, list):
  77 |             text = ''.join(text)
  78 |         elif hasattr(text, 'source'):
  79 |             text = str(text.source)
  80 |         return strip_ansi_codes(str(text))
  81 |     
  82 |     elif output_type in ["display_data", "execute_result"]:
  83 |         data = output.get("data", {})
  84 |         if "image/png" in data:
  85 |             if ALLOW_IMG_OUTPUT:
  86 |                 try:
  87 |                     return ImageContent(type="image", data=data["image/png"], mimeType="image/png")
  88 |                 except Exception:
  89 |                     # Fallback to text placeholder on error
  90 |                     return "[Image Output (PNG) - Error processing image]"
  91 |             else:
  92 |                 return "[Image Output (PNG) - Image display disabled]"
  93 |         if "text/plain" in data:
  94 |             plain_text = data["text/plain"]
  95 |             if hasattr(plain_text, 'source'):
  96 |                 plain_text = str(plain_text.source)
  97 |             return strip_ansi_codes(str(plain_text))
  98 |         elif "text/html" in data:
  99 |             return "[HTML Output]"
 100 |         else:
 101 |             return f"[{output_type} Data: keys={list(data.keys())}]"
 102 |     
 103 |     elif output_type == "error":
 104 |         traceback = output.get("traceback", [])
 105 |         if isinstance(traceback, list):
 106 |             clean_traceback = []
 107 |             for line in traceback:
 108 |                 if hasattr(line, 'source'):
 109 |                     line = str(line.source)
 110 |                 clean_traceback.append(strip_ansi_codes(str(line)))
 111 |             return '\n'.join(clean_traceback)
 112 |         else:
 113 |             if hasattr(traceback, 'source'):
 114 |                 traceback = str(traceback.source)
 115 |             return strip_ansi_codes(str(traceback))
 116 |     
 117 |     else:
 118 |         return f"[Unknown output type: {output_type}]"
 119 | 
 120 | 
 121 | def strip_ansi_codes(text: str) -> str:
 122 |     """Remove ANSI escape sequences from text."""
 123 |     ansi_escape = re.compile(r'\x1b\[[0-9;]*m')
 124 |     return ansi_escape.sub('', text)
 125 | 
 126 | 
 127 | def _clean_notebook_outputs(notebook):
 128 |     """Remove transient fields from all cell outputs.
 129 |     
 130 |     The 'transient' field is part of the Jupyter kernel messaging protocol
 131 |     but is NOT part of the nbformat schema. This causes validation errors.
 132 |     
 133 |     Args:
 134 |         notebook: nbformat notebook object to clean (modified in place)
 135 |     """
 136 |     for cell in notebook.cells:
 137 |         if cell.cell_type == 'code' and hasattr(cell, 'outputs'):
 138 |             for output in cell.outputs:
 139 |                 if isinstance(output, dict) and 'transient' in output:
 140 |                     del output['transient']
 141 | 
 142 | 
 143 | def safe_extract_outputs(outputs: Any) -> list[Union[str, ImageContent]]:
 144 |     """
 145 |     Safely extract all outputs from a cell, handling CRDT structures.
 146 |     
 147 |     Args:
 148 |         outputs: Cell outputs (could be CRDT YArray or traditional list)
 149 |         
 150 |     Returns:
 151 |         list[Union[str, ImageContent]]: List of outputs (strings or image content)
 152 |     """
 153 |     if not outputs:
 154 |         return []
 155 |     
 156 |     result = []
 157 |     
 158 |     # Handle CRDT YArray or list of outputs
 159 |     if hasattr(outputs, '__iter__') and not isinstance(outputs, (str, dict)):
 160 |         try:
 161 |             for output in outputs:
 162 |                 extracted = extract_output(output)
 163 |                 if extracted:
 164 |                     result.append(extracted)
 165 |         except Exception as e:
 166 |             result.append(f"[Error extracting output: {str(e)}]")
 167 |     else:
 168 |         # Handle single output
 169 |         extracted = extract_output(outputs)
 170 |         if extracted:
 171 |             result.append(extracted)
 172 |     
 173 |     return result
 174 | 
 175 | def normalize_cell_source(source: Any) -> list[str]:
 176 |     """
 177 |     Normalize cell source to a list of strings (lines).
 178 |     
 179 |     In Jupyter notebooks, source can be either:
 180 |     - A string (single or multi-line with \n)  
 181 |     - A list of strings (each element is a line)
 182 |     - CRDT text objects
 183 |     
 184 |     Args:
 185 |         source: The source from a Jupyter cell
 186 |         
 187 |     Returns:
 188 |         list[str]: List of source lines
 189 |     """
 190 |     if not source:
 191 |         return []
 192 |     
 193 |     # Handle CRDT text objects
 194 |     if hasattr(source, 'source'):
 195 |         source = str(source.source)
 196 |     elif hasattr(source, '__str__') and 'Text' in str(type(source)):
 197 |         source = str(source)
 198 |     
 199 |     # If it's already a list, return as is
 200 |     if isinstance(source, list):
 201 |         return [str(line) for line in source]
 202 |     
 203 |     # If it's a string, split by newlines
 204 |     if isinstance(source, str):
 205 |         # Split by newlines but preserve the newline characters except for the last line
 206 |         lines = source.splitlines(keepends=True)
 207 |         # Remove trailing newline from the last line if present
 208 |         if lines and lines[-1].endswith('\n'):
 209 |             lines[-1] = lines[-1][:-1]
 210 |         return lines
 211 |     
 212 |     # Fallback: convert to string and split
 213 |     return str(source).splitlines(keepends=True)
 214 | 
 215 | def format_TSV(headers: list[str], rows: list[list[str]]) -> str:
 216 |     """
 217 |     Format data as TSV (Tab-Separated Values)
 218 |     
 219 |     Args:
 220 |         headers: The list of headers
 221 |         rows: The list of data rows, each row is a list of strings
 222 |     
 223 |     Returns:
 224 |         The formatted TSV string
 225 |     """
 226 |     if not headers or not rows:
 227 |         return "No data to display"
 228 |     
 229 |     result = []
 230 |     
 231 |     header_row = "\t".join(headers)
 232 |     result.append(header_row)
 233 |     
 234 |     for row in rows:
 235 |         data_row = "\t".join(str(cell) for cell in row)
 236 |         result.append(data_row)
 237 |     
 238 |     return "\n".join(result)
 239 | 
 240 | def get_surrounding_cells_info(notebook, cell_index: int, total_cells: int) -> str:
 241 |     """Get information about surrounding cells for context."""
 242 |     start_index = max(0, cell_index - 5)
 243 |     end_index = min(total_cells, cell_index + 6)
 244 |     
 245 |     if total_cells == 0:
 246 |         return "Notebook is now empty, no cells remaining"
 247 |     
 248 |     headers = ["Index", "Type", "Count", "First Line"]
 249 |     rows = []
 250 |     
 251 |     for i in range(start_index, end_index):
 252 |         if i >= total_cells:
 253 |             break
 254 |             
 255 |         cell_data = notebook[i]
 256 |         cell_type = cell_data.get("cell_type", "unknown")
 257 |         
 258 |         execution_count = (cell_data.get("execution_count") or "None") if cell_type == "code" else "N/A"
 259 |         # Get first line of source
 260 |         source_lines = normalize_cell_source(cell_data.get("source", ""))
 261 |         first_line = source_lines[0] if source_lines else ""
 262 |         # Mark the target cell
 263 |         marker = " <-- NEW" if i == cell_index else ""
 264 |         rows.append([i, cell_type, execution_count, first_line+marker])
 265 |     
 266 |     return format_TSV(headers, rows)
 267 | 
 268 | 
 269 | ###############################################################################
 270 | # Kernel and notebook operation helpers
 271 | ###############################################################################
 272 | 
 273 | 
 274 | def create_kernel(config, logger):
 275 |     """Create a new kernel instance using current configuration."""
 276 |     from jupyter_kernel_client import KernelClient
 277 |     kernel = None
 278 |     try:
 279 |         # Initialize the kernel client with the provided parameters.
 280 |         kernel = KernelClient(
 281 |             server_url=config.runtime_url, 
 282 |             token=config.runtime_token, 
 283 |             kernel_id=config.runtime_id
 284 |         )
 285 |         kernel.start()
 286 |         logger.info("Kernel created and started successfully")
 287 |         return kernel
 288 |     except Exception as e:
 289 |         logger.error(f"Failed to create kernel: {e}")
 290 |         # Clean up partially initialized kernel to prevent __del__ errors
 291 |         if kernel is not None:
 292 |             try:
 293 |                 # Try to clean up the kernel object if it exists
 294 |                 if hasattr(kernel, 'stop'):
 295 |                     kernel.stop()
 296 |             except Exception as cleanup_error:
 297 |                 logger.debug(f"Error during kernel cleanup: {cleanup_error}")
 298 |         raise
 299 | 
 300 | 
 301 | def start_kernel(notebook_manager, config, logger):
 302 |     """Start the Jupyter kernel with error handling (for backward compatibility)."""
 303 |     try:
 304 |         # Remove existing default notebook if any
 305 |         if "default" in notebook_manager:
 306 |             notebook_manager.remove_notebook("default")
 307 |         
 308 |         # Create and set up new kernel
 309 |         kernel = create_kernel(config, logger)
 310 |         notebook_manager.add_notebook("default", kernel)
 311 |         logger.info("Default notebook kernel started successfully")
 312 |     except Exception as e:
 313 |         logger.error(f"Failed to start kernel: {e}")
 314 |         raise
 315 | 
 316 | 
 317 | def ensure_kernel_alive(notebook_manager, current_notebook, create_kernel_fn):
 318 |     """Ensure kernel is running, restart if needed."""
 319 |     return notebook_manager.ensure_kernel_alive(current_notebook, create_kernel_fn)
 320 | 
 321 | 
 322 | async def execute_cell_with_timeout(notebook, cell_index, kernel, timeout_seconds, logger):
 323 |     """Execute a cell with timeout and real-time output sync."""
 324 |     import asyncio
 325 |     import time
 326 |     from concurrent.futures import ThreadPoolExecutor
 327 |     
 328 |     start_time = time.time()
 329 |     
 330 |     def _execute_sync():
 331 |         return notebook.execute_cell(cell_index, kernel)
 332 |     
 333 |     executor = ThreadPoolExecutor(max_workers=1)
 334 |     try:
 335 |         future = executor.submit(_execute_sync)
 336 |         
 337 |         while not future.done():
 338 |             elapsed = time.time() - start_time
 339 |             if elapsed > timeout_seconds:
 340 |                 future.cancel()
 341 |                 raise asyncio.TimeoutError(f"Cell execution timed out after {timeout_seconds} seconds")
 342 |             
 343 |             await asyncio.sleep(2)
 344 |             try:
 345 |                 # Try to force document sync using the correct method
 346 |                 ydoc = notebook._doc
 347 |                 if hasattr(ydoc, 'flush') and callable(ydoc.flush):
 348 |                     ydoc.flush()  # Flush pending changes
 349 |                 elif hasattr(notebook, '_websocket') and notebook._websocket:
 350 |                     # Force a small update to trigger sync
 351 |                     pass  # The websocket should auto-sync
 352 |                 
 353 |                 if cell_index < len(ydoc._ycells):
 354 |                     outputs = ydoc._ycells[cell_index].get("outputs", [])
 355 |                     if outputs:
 356 |                         logger.info(f"Cell {cell_index} executing... ({elapsed:.1f}s) - {len(outputs)} outputs so far")
 357 |             except Exception as e:
 358 |                 logger.debug(f"Sync attempt failed: {e}")
 359 |                 pass
 360 |         
 361 |         result = future.result()
 362 |         return result
 363 |         
 364 |     finally:
 365 |         executor.shutdown(wait=False)
 366 | 
 367 | 
 368 | async def execute_cell_with_forced_sync(notebook, cell_index, kernel, timeout_seconds, logger):
 369 |     """Execute cell with forced real-time synchronization."""
 370 |     import asyncio
 371 |     import time
 372 |     
 373 |     start_time = time.time()
 374 |     
 375 |     # Start execution
 376 |     execution_future = asyncio.create_task(
 377 |         asyncio.to_thread(notebook.execute_cell, cell_index, kernel)
 378 |     )
 379 |     
 380 |     last_output_count = 0
 381 |     
 382 |     while not execution_future.done():
 383 |         elapsed = time.time() - start_time
 384 |         
 385 |         if elapsed > timeout_seconds:
 386 |             execution_future.cancel()
 387 |             try:
 388 |                 if hasattr(kernel, 'interrupt'):
 389 |                     kernel.interrupt()
 390 |             except Exception:
 391 |                 pass
 392 |             raise asyncio.TimeoutError(f"Cell execution timed out after {timeout_seconds} seconds")
 393 |         
 394 |         # Check for new outputs and try to trigger sync
 395 |         try:
 396 |             ydoc = notebook._doc
 397 |             current_outputs = ydoc._ycells[cell_index].get("outputs", [])
 398 |             
 399 |             if len(current_outputs) > last_output_count:
 400 |                 last_output_count = len(current_outputs)
 401 |                 logger.info(f"Cell {cell_index} progress: {len(current_outputs)} outputs after {elapsed:.1f}s")
 402 |                 
 403 |                 # Try different sync methods
 404 |                 try:
 405 |                     # Method 1: Force Y-doc update
 406 |                     if hasattr(ydoc, 'observe') and hasattr(ydoc, 'unobserve'):
 407 |                         # Trigger observers by making a tiny change
 408 |                         pass
 409 |                         
 410 |                     # Method 2: Force websocket message
 411 |                     if hasattr(notebook, '_websocket') and notebook._websocket:
 412 |                         # The websocket should automatically sync on changes
 413 |                         pass
 414 |                         
 415 |                 except Exception as sync_error:
 416 |                     logger.debug(f"Sync method failed: {sync_error}")
 417 |                     
 418 |         except Exception as e:
 419 |             logger.debug(f"Output check failed: {e}")
 420 |         
 421 |         await asyncio.sleep(1)  # Check every second
 422 |     
 423 |     # Get final result
 424 |     try:
 425 |         await execution_future
 426 |     except asyncio.CancelledError:
 427 |         pass
 428 |     
 429 |     return None
 430 | 
 431 | 
 432 | def is_kernel_busy(kernel):
 433 |     """Check if kernel is currently executing something."""
 434 |     try:
 435 |         # This is a simple check - you might need to adapt based on your kernel client
 436 |         if hasattr(kernel, '_client') and hasattr(kernel._client, 'is_alive'):
 437 |             return kernel._client.is_alive()
 438 |         return False
 439 |     except Exception:
 440 |         return False
 441 | 
 442 | 
 443 | async def wait_for_kernel_idle(kernel, logger, max_wait_seconds=60):
 444 |     """Wait for kernel to become idle before proceeding."""
 445 |     import asyncio
 446 |     import time
 447 |     
 448 |     start_time = time.time()
 449 |     while is_kernel_busy(kernel):
 450 |         elapsed = time.time() - start_time
 451 |         if elapsed > max_wait_seconds:
 452 |             logger.warning(f"Kernel still busy after {max_wait_seconds}s, proceeding anyway")
 453 |             break
 454 |         logger.info(f"Waiting for kernel to become idle... ({elapsed:.1f}s)")
 455 |         await asyncio.sleep(1)
 456 | 
 457 | 
 458 | async def safe_notebook_operation(operation_func, logger, max_retries=3):
 459 |     """Safely execute notebook operations with connection recovery."""
 460 |     import asyncio
 461 |     
 462 |     for attempt in range(max_retries):
 463 |         try:
 464 |             return await operation_func()
 465 |         except Exception as e:
 466 |             error_msg = str(e).lower()
 467 |             if any(err in error_msg for err in ["websocketclosederror", "connection is already closed", "connection closed"]):
 468 |                 if attempt < max_retries - 1:
 469 |                     logger.warning(f"Connection lost, retrying... (attempt {attempt + 1}/{max_retries})")
 470 |                     await asyncio.sleep(1 + attempt)  # Increasing delay
 471 |                     continue
 472 |                 else:
 473 |                     logger.error(f"Failed after {max_retries} attempts: {e}")
 474 |                     raise Exception(f"Connection failed after {max_retries} retries: {e}")
 475 |             else:
 476 |                 # Non-connection error, don't retry
 477 |                 raise e
 478 |     
 479 |     raise Exception("Unexpected error in retry logic")
 480 | 
 481 | 
 482 | def list_files_recursively(server_client, current_path="", current_depth=0, files=None, max_depth=3):
 483 |     """Recursively list all files and directories in the Jupyter server."""
 484 |     if files is None:
 485 |         files = []
 486 |     
 487 |     # Stop if we've reached max depth
 488 |     if current_depth > max_depth:
 489 |         return files
 490 |     
 491 |     try:
 492 |         contents = server_client.contents.list_directory(current_path)
 493 |         for item in contents:
 494 |             full_path = f"{current_path}/{item.name}" if current_path else item.name
 495 |             
 496 |             # Format size
 497 |             size_str = ""
 498 |             if hasattr(item, 'size') and item.size is not None:
 499 |                 if item.size < 1024:
 500 |                     size_str = f"{item.size}B"
 501 |                 elif item.size < 1024 * 1024:
 502 |                     size_str = f"{item.size // 1024}KB"
 503 |                 else:
 504 |                     size_str = f"{item.size // (1024 * 1024)}MB"
 505 |             
 506 |             # Format last modified
 507 |             last_modified = ""
 508 |             if hasattr(item, 'last_modified') and item.last_modified:
 509 |                 last_modified = item.last_modified.strftime("%Y-%m-%d %H:%M:%S")
 510 |             
 511 |             # Add file/directory to list
 512 |             files.append({
 513 |                 'path': full_path,
 514 |                 'type': item.type,
 515 |                 'size': size_str,
 516 |                 'last_modified': last_modified
 517 |             })
 518 |             
 519 |             # Recursively explore directories
 520 |             if item.type == "directory":
 521 |                 list_files_recursively(server_client, full_path, current_depth + 1, files, max_depth)
 522 |                 
 523 |     except Exception as e:
 524 |         # If we can't access a directory, add an error entry
 525 |         files.append({
 526 |             'path': current_path or "root",
 527 |             'type': "error",
 528 |             'size': "",
 529 |             'last_modified': f"Error: {str(e)}"
 530 |         })
 531 |     
 532 |     return files
 533 | 
 534 | 
 535 | ###############################################################################
 536 | # Local code execution helpers (JUPYTER_SERVER mode)
 537 | ###############################################################################
 538 | 
 539 | 
 540 | async def execute_via_execution_stack(
 541 |     serverapp: Any,
 542 |     kernel_id: str,
 543 |     code: str,
 544 |     document_id: str = None,
 545 |     cell_id: str = None,
 546 |     timeout: int = 300,
 547 |     poll_interval: float = 0.1,
 548 |     logger = None
 549 | ) -> list[Union[str, ImageContent]]:
 550 |     """Execute code using ExecutionStack (JUPYTER_SERVER mode with jupyter-server-nbmodel).
 551 |     
 552 |     This uses the ExecutionStack from jupyter-server-nbmodel extension directly,
 553 |     avoiding the reentrant HTTP call issue. This is the preferred method for code
 554 |     execution in JUPYTER_SERVER mode.
 555 |     
 556 |     Args:
 557 |         serverapp: Jupyter server application instance
 558 |         kernel_id: Kernel ID to execute in
 559 |         code: Code to execute
 560 |         document_id: Optional document ID for RTC integration (format: json:notebook:<file_id>)
 561 |         cell_id: Optional cell ID for RTC integration
 562 |         timeout: Maximum time to wait for execution (seconds)
 563 |         poll_interval: Time between polling for results (seconds)
 564 |         logger: Logger instance (optional)
 565 |         
 566 |     Returns:
 567 |         List of formatted outputs (strings or ImageContent)
 568 |         
 569 |     Raises:
 570 |         RuntimeError: If jupyter-server-nbmodel extension is not installed
 571 |         TimeoutError: If execution exceeds timeout
 572 |     """
 573 |     import asyncio
 574 |     import logging as default_logging
 575 |     
 576 |     if logger is None:
 577 |         logger = default_logging.getLogger(__name__)
 578 |     
 579 |     try:
 580 |         # Get the ExecutionStack from the jupyter_server_nbmodel extension
 581 |         nbmodel_extensions = serverapp.extension_manager.extension_apps.get("jupyter_server_nbmodel", set())
 582 |         if not nbmodel_extensions:
 583 |             raise RuntimeError("jupyter_server_nbmodel extension not found. Please install it.")
 584 |         
 585 |         nbmodel_ext = next(iter(nbmodel_extensions))
 586 |         execution_stack = nbmodel_ext._Extension__execution_stack
 587 |         
 588 |         # Build metadata for RTC integration if available
 589 |         metadata = {}
 590 |         if document_id and cell_id:
 591 |             metadata = {
 592 |                 "document_id": document_id,
 593 |                 "cell_id": cell_id
 594 |             }
 595 |         
 596 |         # Submit execution request
 597 |         logger.info(f"Submitting execution request to kernel {kernel_id}")
 598 |         request_id = execution_stack.put(kernel_id, code, metadata)
 599 |         logger.info(f"Execution request {request_id} submitted")
 600 |         
 601 |         # Poll for results
 602 |         start_time = asyncio.get_event_loop().time()
 603 |         while True:
 604 |             elapsed = asyncio.get_event_loop().time() - start_time
 605 |             if elapsed > timeout:
 606 |                 raise TimeoutError(f"Execution timed out after {timeout} seconds")
 607 |             
 608 |             # Get result (returns None if pending, result dict if complete)
 609 |             result = execution_stack.get(kernel_id, request_id)
 610 |             
 611 |             if result is not None:
 612 |                 # Execution complete
 613 |                 logger.info(f"Execution request {request_id} completed")
 614 |                 
 615 |                 # Check for errors
 616 |                 if "error" in result:
 617 |                     error_info = result["error"]
 618 |                     logger.error(f"Execution error: {error_info}")
 619 |                     return [f"[ERROR: {error_info.get('ename', 'Unknown')}: {error_info.get('evalue', '')}]"]
 620 |                 
 621 |                 # Check for pending input (shouldn't happen with allow_stdin=False)
 622 |                 if "input_request" in result:
 623 |                     logger.warning("Unexpected input request during execution")
 624 |                     return ["[ERROR: Unexpected input request]"]
 625 |                 
 626 |                 # Extract outputs
 627 |                 outputs = result.get("outputs", [])
 628 |                 
 629 |                 # Parse JSON string if needed (ExecutionStack returns JSON string)
 630 |                 if isinstance(outputs, str):
 631 |                     import json
 632 |                     try:
 633 |                         outputs = json.loads(outputs)
 634 |                     except json.JSONDecodeError:
 635 |                         logger.error(f"Failed to parse outputs JSON: {outputs}")
 636 |                         return [f"[ERROR: Invalid output format]"]
 637 |                 
 638 |                 if outputs:
 639 |                     formatted = safe_extract_outputs(outputs)
 640 |                     logger.info(f"Execution completed with {len(formatted)} formatted outputs: {formatted}")
 641 |                     return formatted
 642 |                 else:
 643 |                     logger.info("Execution completed with no outputs")
 644 |                     return ["[No output generated]"]
 645 |             
 646 |             # Still pending, wait before next poll
 647 |             await asyncio.sleep(poll_interval)
 648 |             
 649 |     except Exception as e:
 650 |         logger.error(f"Error executing via ExecutionStack: {e}", exc_info=True)
 651 |         return [f"[ERROR: {str(e)}]"]
 652 | 
 653 | 
 654 | async def execute_code_local(
 655 |     serverapp,
 656 |     notebook_path: str,
 657 |     code: str,
 658 |     kernel_id: str,
 659 |     timeout: int = 300,
 660 |     logger=None
 661 | ) -> list[Union[str, ImageContent]]:
 662 |     """Execute code in a kernel and return outputs (JUPYTER_SERVER mode).
 663 |     
 664 |     This is a centralized code execution function for JUPYTER_SERVER mode that:
 665 |     1. Gets the kernel from kernel_manager
 666 |     2. Creates a client and sends execute_request
 667 |     3. Polls for response messages with timeout
 668 |     4. Collects and formats outputs
 669 |     5. Cleans up resources
 670 |     
 671 |     Args:
 672 |         serverapp: Jupyter ServerApp instance
 673 |         notebook_path: Path to the notebook (for context)
 674 |         code: Code to execute
 675 |         kernel_id: ID of the kernel to execute in
 676 |         timeout: Timeout in seconds (default: 300)
 677 |         logger: Logger instance (optional)
 678 |         
 679 |     Returns:
 680 |         List of formatted outputs (strings or ImageContent)
 681 |     """
 682 |     import asyncio
 683 |     import zmq.asyncio
 684 |     from inspect import isawaitable
 685 |     
 686 |     if logger is None:
 687 |         import logging
 688 |         logger = logging.getLogger(__name__)
 689 |     
 690 |     try:
 691 |         # Get kernel manager
 692 |         kernel_manager = serverapp.kernel_manager
 693 |         
 694 |         # Get the kernel using pinned_superclass pattern (like KernelUsageHandler)
 695 |         lkm = kernel_manager.pinned_superclass.get_kernel(kernel_manager, kernel_id)
 696 |         session = lkm.session
 697 |         client = lkm.client()
 698 |         
 699 |         # Ensure channels are started (critical for receiving IOPub messages!)
 700 |         if not client.channels_running:
 701 |             client.start_channels()
 702 |             # Wait for channels to be ready
 703 |             await asyncio.sleep(0.1)
 704 |         
 705 |         # Send execute request on shell channel
 706 |         shell_channel = client.shell_channel
 707 |         msg_id = session.msg("execute_request", {
 708 |             "code": code,
 709 |             "silent": False,
 710 |             "store_history": True,
 711 |             "user_expressions": {},
 712 |             "allow_stdin": False,
 713 |             "stop_on_error": False
 714 |         })
 715 |         shell_channel.send(msg_id)
 716 |         
 717 |         # Give a moment for messages to start flowing
 718 |         await asyncio.sleep(0.01)
 719 |         
 720 |         # Prepare to collect outputs
 721 |         outputs = []
 722 |         execution_done = False
 723 |         grace_period_ms = 100  # Wait 100ms after shell reply for remaining IOPub messages
 724 |         execution_done_time = None
 725 |         
 726 |         # Poll for messages with timeout
 727 |         poller = zmq.asyncio.Poller()
 728 |         iopub_socket = client.iopub_channel.socket
 729 |         shell_socket = shell_channel.socket
 730 |         poller.register(iopub_socket, zmq.POLLIN)
 731 |         poller.register(shell_socket, zmq.POLLIN)
 732 |         
 733 |         timeout_ms = timeout * 1000
 734 |         start_time = asyncio.get_event_loop().time()
 735 |         
 736 |         while not execution_done or (execution_done_time and (asyncio.get_event_loop().time() - execution_done_time) * 1000 < grace_period_ms):
 737 |             elapsed_ms = (asyncio.get_event_loop().time() - start_time) * 1000
 738 |             remaining_ms = max(0, timeout_ms - elapsed_ms)
 739 |             
 740 |             # If execution is done and grace period expired, exit
 741 |             if execution_done and execution_done_time and (asyncio.get_event_loop().time() - execution_done_time) * 1000 >= grace_period_ms:
 742 |                 break
 743 |             
 744 |             if remaining_ms <= 0:
 745 |                 client.stop_channels()
 746 |                 logger.warning(f"Code execution timeout after {timeout}s, collected {len(outputs)} outputs")
 747 |                 return [f"[TIMEOUT ERROR: Code execution exceeded {timeout} seconds]"]
 748 |             
 749 |             # Use shorter poll timeout during grace period
 750 |             poll_timeout = min(remaining_ms, grace_period_ms / 2) if execution_done else remaining_ms
 751 |             events = dict(await poller.poll(poll_timeout))
 752 |             
 753 |             if not events:
 754 |                 continue  # No messages, continue polling
 755 |             
 756 |             # IMPORTANT: Process IOPub messages BEFORE shell to collect outputs before marking done
 757 |             # Check for IOPub messages (outputs)
 758 |             if iopub_socket in events:
 759 |                 msg = client.iopub_channel.get_msg(timeout=0)
 760 |                 # Handle async get_msg (like KernelUsageHandler)
 761 |                 if isawaitable(msg):
 762 |                     msg = await msg
 763 |                 
 764 |                 if msg and msg.get('parent_header', {}).get('msg_id') == msg_id['header']['msg_id']:
 765 |                     msg_type = msg.get('msg_type')
 766 |                     content = msg.get('content', {})
 767 |                     
 768 |                     logger.debug(f"IOPub message: {msg_type}")
 769 |                     
 770 |                     # Collect output messages
 771 |                     if msg_type == 'stream':
 772 |                         outputs.append({
 773 |                             'output_type': 'stream',
 774 |                             'name': content.get('name', 'stdout'),
 775 |                             'text': content.get('text', '')
 776 |                         })
 777 |                         logger.debug(f"Collected stream output: {len(content.get('text', ''))} chars")
 778 |                     elif msg_type == 'execute_result':
 779 |                         outputs.append({
 780 |                             'output_type': 'execute_result',
 781 |                             'data': content.get('data', {}),
 782 |                             'metadata': content.get('metadata', {}),
 783 |                             'execution_count': content.get('execution_count')
 784 |                         })
 785 |                         logger.debug(f"Collected execute_result, count: {content.get('execution_count')}")
 786 |                     elif msg_type == 'display_data':
 787 |                         # Note: 'transient' field from kernel messages is NOT part of nbformat schema
 788 |                         # Only include 'output_type', 'data', and 'metadata' fields
 789 |                         outputs.append({
 790 |                             'output_type': 'display_data',
 791 |                             'data': content.get('data', {}),
 792 |                             'metadata': content.get('metadata', {})
 793 |                         })
 794 |                         logger.debug("Collected display_data")
 795 |                     elif msg_type == 'error':
 796 |                         outputs.append({
 797 |                             'output_type': 'error',
 798 |                             'ename': content.get('ename', ''),
 799 |                             'evalue': content.get('evalue', ''),
 800 |                             'traceback': content.get('traceback', [])
 801 |                         })
 802 |                         logger.debug(f"Collected error: {content.get('ename')}")
 803 |             
 804 |             # Check for shell reply (execution complete) - AFTER processing IOPub
 805 |             if shell_socket in events:
 806 |                 reply = client.shell_channel.get_msg(timeout=0)
 807 |                 # Handle async get_msg (like KernelUsageHandler)
 808 |                 if isawaitable(reply):
 809 |                     reply = await reply
 810 |                 
 811 |                 if reply and reply.get('parent_header', {}).get('msg_id') == msg_id['header']['msg_id']:
 812 |                     logger.debug(f"Execution complete, reply status: {reply.get('content', {}).get('status')}")
 813 |                     execution_done = True
 814 |                     execution_done_time = asyncio.get_event_loop().time()
 815 |         
 816 |         # Clean up
 817 |         client.stop_channels()
 818 |         
 819 |         # Extract and format outputs
 820 |         if outputs:
 821 |             result = safe_extract_outputs(outputs)
 822 |             logger.info(f"Code execution completed with {len(result)} outputs")
 823 |             return result
 824 |         else:
 825 |             return ["[No output generated]"]
 826 |             
 827 |     except Exception as e:
 828 |         logger.error(f"Error executing code locally: {e}")
 829 |         return [f"[ERROR: {str(e)}]"]
 830 | 
 831 | 
 832 | async def execute_cell_local(
 833 |     serverapp,
 834 |     notebook_path: str,
 835 |     cell_index: int,
 836 |     kernel_id: str,
 837 |     timeout: int = 300,
 838 |     logger=None
 839 | ) -> list[Union[str, ImageContent]]:
 840 |     """Execute a cell in a notebook and return outputs (JUPYTER_SERVER mode).
 841 |     
 842 |     This function:
 843 |     1. Reads the cell source from the notebook (YDoc or file)
 844 |     2. Executes the code using execute_code_local
 845 |     3. Writes the outputs back to the notebook (YDoc or file)
 846 |     4. Returns the formatted outputs
 847 |     
 848 |     Args:
 849 |         serverapp: Jupyter ServerApp instance
 850 |         notebook_path: Path to the notebook
 851 |         cell_index: Index of the cell to execute
 852 |         kernel_id: ID of the kernel to execute in
 853 |         timeout: Timeout in seconds (default: 300)
 854 |         logger: Logger instance (optional)
 855 |         
 856 |     Returns:
 857 |         List of formatted outputs (strings or ImageContent)
 858 |     """
 859 |     import nbformat
 860 |     
 861 |     if logger is None:
 862 |         import logging
 863 |         logger = logging.getLogger(__name__)
 864 |     
 865 |     try:
 866 |         # Try to get YDoc first (for collaborative editing)
 867 |         file_id_manager = serverapp.web_app.settings.get("file_id_manager")
 868 |         ydoc = None
 869 |         
 870 |         if file_id_manager:
 871 |             file_id = file_id_manager.get_id(notebook_path)
 872 |             yroom_manager = serverapp.web_app.settings.get("yroom_manager")
 873 |             
 874 |             if yroom_manager:
 875 |                 room_id = f"json:notebook:{file_id}"
 876 |                 if yroom_manager.has_room(room_id):
 877 |                     try:
 878 |                         yroom = yroom_manager.get_room(room_id)
 879 |                         ydoc = await yroom.get_jupyter_ydoc()
 880 |                         logger.info(f"Using YDoc for cell {cell_index} execution")
 881 |                     except Exception as e:
 882 |                         logger.debug(f"Could not get YDoc: {e}")
 883 |         
 884 |         # Execute using YDoc or file
 885 |         if ydoc:
 886 |             # YDoc path - read from collaborative document
 887 |             if cell_index < 0 or cell_index >= len(ydoc.ycells):
 888 |                 raise ValueError(f"Cell index {cell_index} out of range. Notebook has {len(ydoc.ycells)} cells.")
 889 |             
 890 |             cell = ydoc.ycells[cell_index]
 891 |             
 892 |             # Only execute code cells
 893 |             cell_type = cell.get("cell_type", "")
 894 |             if cell_type != "code":
 895 |                 return [f"[Cell {cell_index} is not a code cell (type: {cell_type})]"]
 896 |             
 897 |             source_raw = cell.get("source", "")
 898 |             if isinstance(source_raw, list):
 899 |                 source = "".join(source_raw)
 900 |             else:
 901 |                 source = str(source_raw)
 902 |             
 903 |             if not source:
 904 |                 return ["[Cell is empty]"]
 905 |             
 906 |             logger.info(f"Cell {cell_index} source from YDoc: {source[:100]}...")
 907 |             
 908 |             # Execute the code
 909 |             outputs = await execute_code_local(
 910 |                 serverapp=serverapp,
 911 |                 notebook_path=notebook_path,
 912 |                 code=source,
 913 |                 kernel_id=kernel_id,
 914 |                 timeout=timeout,
 915 |                 logger=logger
 916 |             )
 917 |             
 918 |             logger.info(f"Execution completed with {len(outputs)} outputs: {outputs}")
 919 |             
 920 |             # Update execution count in YDoc
 921 |             max_count = 0
 922 |             for c in ydoc.ycells:
 923 |                 if c.get("cell_type") == "code" and c.get("execution_count"):
 924 |                     max_count = max(max_count, c["execution_count"])
 925 |             
 926 |             cell["execution_count"] = max_count + 1
 927 |             
 928 |             # Update outputs in YDoc (simplified - just store formatted strings)
 929 |             # YDoc outputs should match nbformat structure
 930 |             cell["outputs"] = []
 931 |             for output in outputs:
 932 |                 if isinstance(output, str):
 933 |                     cell["outputs"].append({
 934 |                         "output_type": "stream",
 935 |                         "name": "stdout",
 936 |                         "text": output
 937 |                     })
 938 |             
 939 |             return outputs
 940 |         else:
 941 |             # File path - original logic
 942 |             # Read notebook as version 4 (latest) for consistency
 943 |             with open(notebook_path, 'r', encoding='utf-8') as f:
 944 |                 notebook = nbformat.read(f, as_version=4)
 945 |             
 946 |             # Clean transient fields from outputs
 947 |             _clean_notebook_outputs(notebook)
 948 |             
 949 |             # Validate cell index
 950 |             if cell_index < 0 or cell_index >= len(notebook.cells):
 951 |                 raise ValueError(f"Cell index {cell_index} out of range. Notebook has {len(notebook.cells)} cells.")
 952 |         
 953 |         cell = notebook.cells[cell_index]
 954 |         
 955 |         # Only execute code cells
 956 |         if cell.cell_type != 'code':
 957 |             return [f"[Cell {cell_index} is not a code cell (type: {cell.cell_type})]"]
 958 |         
 959 |         # Get cell source
 960 |         source = cell.source
 961 |         if not source:
 962 |             return ["[Cell is empty]"]
 963 |         
 964 |         # Execute the code
 965 |         logger.info(f"Executing cell {cell_index} from {notebook_path}")
 966 |         outputs = await execute_code_local(
 967 |             serverapp=serverapp,
 968 |             notebook_path=notebook_path,
 969 |             code=source,
 970 |             kernel_id=kernel_id,
 971 |             timeout=timeout,
 972 |             logger=logger
 973 |         )
 974 |         
 975 |         # Write outputs back to notebook (update execution_count and outputs)
 976 |         # Get the last execution count
 977 |         max_count = 0
 978 |         for c in notebook.cells:
 979 |             if c.cell_type == 'code' and c.execution_count:
 980 |                 max_count = max(max_count, c.execution_count)
 981 |         
 982 |         cell.execution_count = max_count + 1
 983 |         
 984 |         # Convert formatted outputs back to nbformat structure
 985 |         # Note: outputs is already formatted, so we need to reconstruct
 986 |         # For simplicity, we'll store a simple representation
 987 |         cell.outputs = []
 988 |         for output in outputs:
 989 |             if isinstance(output, str):
 990 |                 # Create a stream output
 991 |                 cell.outputs.append(nbformat.v4.new_output(
 992 |                     output_type='stream',
 993 |                     name='stdout',
 994 |                     text=output
 995 |                 ))
 996 |             elif isinstance(output, ImageContent):
 997 |                 # Create a display_data output with image
 998 |                 cell.outputs.append(nbformat.v4.new_output(
 999 |                     output_type='display_data',
1000 |                     data={'image/png': output.data}
1001 |                 ))
1002 |         
1003 |         # Write notebook back
1004 |         with open(notebook_path, 'w', encoding='utf-8') as f:
1005 |             nbformat.write(notebook, f)
1006 |         
1007 |         logger.info(f"Cell {cell_index} executed and notebook updated")
1008 |         return outputs
1009 |         
1010 |     except Exception as e:
1011 |         logger.error(f"Error executing cell locally: {e}")
1012 |         return [f"[ERROR: {str(e)}]"]
```
Page 4/6FirstPrevNextLast