This is page 2 of 6. Use http://codebase.md/mikechambers/adb-mcp?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .gitattributes
├── .gitignore
├── adb-proxy-socket
│ ├── .gitignore
│ ├── package-lock.json
│ ├── package.json
│ ├── proxy.js
│ └── README.md
├── cep
│ ├── com.mikechambers.ae
│ │ ├── .debug
│ │ ├── commands.js
│ │ ├── CSXS
│ │ │ └── manifest.xml
│ │ ├── index.html
│ │ ├── jsx
│ │ │ └── json-polyfill.jsx
│ │ ├── lib
│ │ │ └── CSInterface.js
│ │ ├── main.js
│ │ └── style.css
│ └── com.mikechambers.ai
│ ├── .debug
│ ├── commands.js
│ ├── CSXS
│ │ └── manifest.xml
│ ├── index.html
│ ├── jsx
│ │ ├── json-polyfill.jsx
│ │ └── utils.jsx
│ ├── lib
│ │ └── CSInterface.js
│ ├── main.js
│ └── style.css
├── dxt
│ ├── build
│ ├── pr
│ │ └── manifest.json
│ └── ps
│ └── manifest.json
├── images
│ └── claud-attach-mcp.png
├── LICENSE.md
├── mcp
│ ├── .gitignore
│ ├── ae-mcp.py
│ ├── ai-mcp.py
│ ├── core.py
│ ├── fonts.py
│ ├── id-mcp.py
│ ├── logger.py
│ ├── pr-mcp.py
│ ├── ps-batch-play.py
│ ├── ps-mcp.py
│ ├── pyproject.toml
│ ├── requirements.txt
│ ├── socket_client.py
│ └── uv.lock
├── package-lock.json
├── README.md
└── uxp
├── id
│ ├── commands
│ │ └── index.js
│ ├── icons
│ │ ├── [email protected]
│ │ ├── [email protected]
│ │ ├── [email protected]
│ │ └── [email protected]
│ ├── index.html
│ ├── LICENSE
│ ├── main.js
│ ├── manifest.json
│ ├── package.json
│ ├── socket.io.js
│ └── style.css
├── pr
│ ├── commands
│ │ ├── consts.js
│ │ ├── core.js
│ │ ├── index.js
│ │ └── utils.js
│ ├── icons
│ │ ├── [email protected]
│ │ ├── [email protected]
│ │ ├── [email protected]
│ │ └── [email protected]
│ ├── index.html
│ ├── LICENSE
│ ├── main.js
│ ├── manifest.json
│ ├── package.json
│ ├── socket.io.js
│ └── style.css
└── ps
├── commands
│ ├── adjustment_layers.js
│ ├── core.js
│ ├── filters.js
│ ├── index.js
│ ├── layer_styles.js
│ ├── layers.js
│ ├── selection.js
│ └── utils.js
├── icons
│ ├── [email protected]
│ ├── [email protected]
│ ├── [email protected]
│ └── [email protected]
├── index.html
├── LICENSE
├── main.js
├── manifest.json
├── package.json
├── socket.io.js
└── style.css
```
# Files
--------------------------------------------------------------------------------
/mcp/ai-mcp.py:
--------------------------------------------------------------------------------
```python
1 | # MIT License
2 | #
3 | # Copyright (c) 2025 Mike Chambers
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | from mcp.server.fastmcp import FastMCP
24 | from core import init, sendCommand, createCommand
25 | import socket_client
26 | import sys
27 |
28 | # Create an MCP server
29 | mcp_name = "Adobe Illustrator MCP Server"
30 | mcp = FastMCP(mcp_name, log_level="ERROR")
31 | print(f"{mcp_name} running on stdio", file=sys.stderr)
32 |
33 | APPLICATION = "illustrator"
34 | PROXY_URL = 'http://localhost:3001'
35 | PROXY_TIMEOUT = 20
36 |
37 | socket_client.configure(
38 | app=APPLICATION,
39 | url=PROXY_URL,
40 | timeout=PROXY_TIMEOUT
41 | )
42 |
43 | init(APPLICATION, socket_client)
44 |
45 | @mcp.tool()
46 | def get_documents():
47 | """
48 | Returns information about all currently open documents in Illustrator.
49 |
50 | """
51 | command = createCommand("getDocuments", {})
52 | return sendCommand(command)
53 |
54 | @mcp.tool()
55 | def get_active_document_info():
56 | """
57 | Returns information about the current active document.
58 |
59 | """
60 | command = createCommand("getActiveDocumentInfo", {})
61 | return sendCommand(command)
62 |
63 | @mcp.tool()
64 | def open_file(
65 | path: str
66 | ):
67 | """
68 | Opens an Illustrator (.ai) file in Adobe Illustrator.
69 |
70 | Args:
71 | path (str): The absolute file path to the Illustrator file to open.
72 | Example: "/Users/username/Documents/my_artwork.ai"
73 |
74 | Returns:
75 | dict: Result containing:
76 | - success (bool): Whether the file was opened successfully
77 | - error (str): Error message if opening failed
78 |
79 | """
80 |
81 | command_params = {
82 | "path": path
83 | }
84 |
85 | command = createCommand("openFile", command_params)
86 | return sendCommand(command)
87 |
88 | @mcp.tool()
89 | def export_png(
90 | path: str,
91 | transparency: bool = True,
92 | anti_aliasing: bool = True,
93 | artboard_clipping: bool = True,
94 | horizontal_scale: int = 100,
95 | vertical_scale: int = 100,
96 | export_type: str = "PNG24",
97 | matte: bool = None,
98 | matte_color: dict = {"red": 255, "green": 255, "blue": 255}
99 | ):
100 | """
101 | Exports the active Illustrator document as a PNG file.
102 |
103 | Args:
104 | path (str): The absolute file path where the PNG will be saved.
105 | Example: "/Users/username/Documents/my_export.png"
106 | transparency (bool, optional): Enable/disable transparency. Defaults to True.
107 | anti_aliasing (bool, optional): Enable/disable anti-aliasing for smooth edges. Defaults to True.
108 | artboard_clipping (bool, optional): Clip export to artboard bounds. Defaults to True.
109 | horizontal_scale (int, optional): Horizontal scale percentage (1-1000). Defaults to 100.
110 | vertical_scale (int, optional): Vertical scale percentage (1-1000). Defaults to 100.
111 | export_type (str, optional): PNG format type. "PNG24" (24-bit) or "PNG8" (8-bit). Defaults to "PNG24".
112 | matte (bool, optional): Enable matte background color for transparency preview.
113 | If None, uses Illustrator's default behavior.
114 | matte_color (dict, optional): RGB color for matte background. Defaults to {"red": 255, "green": 255, "blue": 255}.
115 | Dict with keys "red", "green", "blue" with values 0-255.
116 |
117 | Returns:
118 | dict: Export result containing:
119 | - success (bool): Whether the export succeeded
120 | - filePath (str): The actual file path where the PNG was saved
121 | - fileExists (bool): Whether the exported file exists
122 | - options (dict): The export options that were used
123 | - documentName (str): Name of the exported document
124 | - error (str): Error message if export failed
125 |
126 | Example:
127 | # Basic PNG export
128 | result = export_png("/Users/username/Desktop/my_artwork.png")
129 |
130 | # High-resolution export with transparency
131 | result = export_png(
132 | path="/Users/username/Desktop/high_res.png",
133 | horizontal_scale=300,
134 | vertical_scale=300,
135 | transparency=True
136 | )
137 |
138 | # PNG8 export with red matte background
139 | result = export_png(
140 | path="/Users/username/Desktop/small_file.png",
141 | export_type="PNG8",
142 | matte=True,
143 | matte_color={"red": 255, "green": 0, "blue": 0}
144 | )
145 |
146 | # Blue matte background
147 | result = export_png(
148 | path="/Users/username/Desktop/blue_bg.png",
149 | matte=True,
150 | matte_color={"red": 0, "green": 100, "blue": 255}
151 | )
152 | """
153 |
154 |
155 | # Only include matte and matteColor if needed
156 | command_params = {
157 | "path": path,
158 | "transparency": transparency,
159 | "antiAliasing": anti_aliasing,
160 | "artBoardClipping": artboard_clipping,
161 | "horizontalScale": horizontal_scale,
162 | "verticalScale": vertical_scale,
163 | "exportType": export_type
164 | }
165 |
166 | # Only include matte if explicitly set
167 | if matte is not None:
168 | command_params["matte"] = matte
169 |
170 | # Include matte color if matte is enabled or custom colors provided
171 | if matte or matte_color != {"red": 255, "green": 255, "blue": 255}:
172 | command_params["matteColor"] = matte_color
173 |
174 | command = createCommand("exportPNG", command_params)
175 | return sendCommand(command)
176 |
177 |
178 |
179 | @mcp.tool()
180 | def execute_extend_script(script_string: str):
181 | """
182 | Executes arbitrary ExtendScript code in Illustrator and returns the result.
183 |
184 | The script should use 'return' to send data back. The result will be automatically
185 | JSON stringified. If the script throws an error, it will be caught and returned
186 | as an error object.
187 |
188 | Args:
189 | script_string (str): The ExtendScript code to execute. Must use 'return' to
190 | send results back.
191 |
192 | Returns:
193 | any: The result returned from the ExtendScript, or an error object containing:
194 | - error (str): Error message
195 | - line (str): Line number where error occurred
196 |
197 | Example:
198 | script = '''
199 | var comp = app.project.activeItem;
200 | return {
201 | name: comp.name,
202 | layers: comp.numLayers
203 | };
204 | '''
205 | result = execute_extend_script(script)
206 | """
207 | command = createCommand("executeExtendScript", {
208 | "scriptString": script_string
209 | })
210 | return sendCommand(command)
211 |
212 | @mcp.resource("config://get_instructions")
213 | def get_instructions() -> str:
214 | """Read this first! Returns information and instructions on how to use Illustrator and this API"""
215 |
216 | return f"""
217 | You are an Illustrator export who is creative and loves to help other people learn to use Illustrator.
218 |
219 | Rules to follow:
220 |
221 | 1. Think deeply about how to solve the task
222 | 2. Always check your work before responding
223 | 3. Read the info for the API calls to make sure you understand the requirements and arguments
224 |
225 | """
226 |
227 |
228 | # Illustrator Blend Modes (for future use)
229 | BLEND_MODES = [
230 | "ADD",
231 | "ALPHA_ADD",
232 | "CLASSIC_COLOR_BURN",
233 | "CLASSIC_COLOR_DODGE",
234 | "CLASSIC_DIFFERENCE",
235 | "COLOR",
236 | "COLOR_BURN",
237 | "COLOR_DODGE",
238 | "DANCING_DISSOLVE",
239 | "DARKEN",
240 | "DARKER_COLOR",
241 | "DIFFERENCE",
242 | "DISSOLVE",
243 | "EXCLUSION",
244 | "HARD_LIGHT",
245 | "HARD_MIX",
246 | "HUE",
247 | "LIGHTEN",
248 | "LIGHTER_COLOR",
249 | "LINEAR_BURN",
250 | "LINEAR_DODGE",
251 | "LINEAR_LIGHT",
252 | "LUMINESCENT_PREMUL",
253 | "LUMINOSITY",
254 | "MULTIPLY",
255 | "NORMAL",
256 | "OVERLAY",
257 | "PIN_LIGHT",
258 | "SATURATION",
259 | "SCREEN",
260 | "SILHOUETE_ALPHA",
261 | "SILHOUETTE_LUMA",
262 | "SOFT_LIGHT",
263 | "STENCIL_ALPHA",
264 | "STENCIL_LUMA",
265 | "SUBTRACT",
266 | "VIVID_LIGHT"
267 | ]
```
--------------------------------------------------------------------------------
/uxp/ps/commands/utils.js:
--------------------------------------------------------------------------------
```javascript
1 | /* MIT License
2 | *
3 | * Copyright (c) 2025 Mike Chambers
4 | *
5 | * Permission is hereby granted, free of charge, to any person obtaining a copy
6 | * of this software and associated documentation files (the "Software"), to deal
7 | * in the Software without restriction, including without limitation the rights
8 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | * copies of the Software, and to permit persons to whom the Software is
10 | * furnished to do so, subject to the following conditions:
11 | *
12 | * The above copyright notice and this permission notice shall be included in all
13 | * copies or substantial portions of the Software.
14 | *
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | * SOFTWARE.
22 | */
23 |
24 | const { app, constants, core } = require("photoshop");
25 | const fs = require("uxp").storage.localFileSystem;
26 | const openfs = require('fs')
27 |
28 |
29 | const convertFontSize = (fontSize) => {
30 | return (app.activeDocument.resolution / 72) * fontSize
31 | }
32 |
33 | const convertFromPhotoshopFontSize = (photoshopFontSize) => {
34 | return photoshopFontSize / (app.activeDocument.resolution / 72);
35 | }
36 |
37 | const createFile = async (filePath) => {
38 | let url = `file:${filePath}`
39 | const fd = await openfs.open(url, "a+");
40 | await openfs.close(fd)
41 |
42 | return url
43 | }
44 |
45 | const parseColor = (color) => {
46 | try {
47 | const c = new app.SolidColor();
48 | c.rgb.red = color.red;
49 | c.rgb.green = color.green;
50 | c.rgb.blue = color.blue;
51 |
52 | return c;
53 | } catch (e) {
54 | throw new Error(`Invalid color values: ${JSON.stringify(color)}`);
55 | }
56 | };
57 |
58 | const getAlignmentMode = (mode) => {
59 | switch (mode) {
60 | case "LEFT":
61 | return "ADSLefts";
62 | case "CENTER_HORIZONTAL":
63 | return "ADSCentersH";
64 | case "RIGHT":
65 | return "ADSRights";
66 | case "TOP":
67 | return "ADSTops";
68 | case "CENTER_VERTICAL":
69 | return "ADSCentersV";
70 | case "BOTTOM":
71 | return "ADSBottoms";
72 | default:
73 | throw new Error(
74 | `getAlignmentMode : Unknown alignment mode : ${mode}`
75 | );
76 | }
77 | };
78 |
79 | const getJustificationMode = (value) => {
80 | return getConstantValue(constants.Justification, value, "Justification");
81 | };
82 |
83 | const getBlendMode = (value) => {
84 | return getConstantValue(constants.BlendMode, value, "BlendMode");
85 | };
86 |
87 | const getInterpolationMethod = (value) => {
88 | return getConstantValue(
89 | constants.InterpolationMethod,
90 | value,
91 | "InterpolationMethod"
92 | );
93 | };
94 |
95 | const getAnchorPosition = (value) => {
96 | return getConstantValue(constants.AnchorPosition, value, "AnchorPosition");
97 | };
98 |
99 | const getNewDocumentMode = (value) => {
100 | return getConstantValue(
101 | constants.NewDocumentMode,
102 | value,
103 | "NewDocumentMode"
104 | );
105 | };
106 |
107 | const getConstantValue = (c, v, n) => {
108 | let out = c[v.toUpperCase()];
109 |
110 | if (!out) {
111 | throw new Error(`getConstantValue : Unknown constant value :${c} ${v}`);
112 | }
113 |
114 | return out;
115 | };
116 |
117 | const selectLayer = (layer, exclusive = false) => {
118 | if (exclusive) {
119 | clearLayerSelections();
120 | }
121 |
122 | layer.selected = true;
123 | };
124 |
125 | const clearLayerSelections = (layers) => {
126 | if (!layers) {
127 | layers = app.activeDocument.layers;
128 | }
129 |
130 | for (const layer of layers) {
131 | layer.selected = false;
132 |
133 | if (layer.layers && layer.layers.length > 0) {
134 | clearLayerSelections(layer.layers);
135 | }
136 | }
137 | };
138 |
139 | const setVisibleAllLayers = (visible, layers) => {
140 | if (!layers) {
141 | layers = app.activeDocument.layers;
142 | }
143 |
144 | for (const layer of layers) {
145 | layer.visible = visible
146 |
147 | if (layer.layers && layer.layers.length > 0) {
148 | setVisibleAllLayers(visible, layer.layers)
149 | }
150 | }
151 | };
152 |
153 |
154 | const findLayer = (id, layers) => {
155 | if (!layers) {
156 | layers = app.activeDocument.layers;
157 | }
158 |
159 | for (const layer of layers) {
160 | if (layer.id === id) {
161 | return layer;
162 | }
163 |
164 | if (layer.layers && layer.layers.length > 0) {
165 | const found = findLayer(id, layer.layers);
166 | if (found) {
167 | return found; // Stop as soon as we’ve found the target layer
168 | }
169 | }
170 | }
171 |
172 | return null;
173 | };
174 |
175 |
176 | const findLayerByName = (name, layers) => {
177 | if (!layers) {
178 | layers = app.activeDocument.layers;
179 | }
180 |
181 | return app.activeDocument.layers.getByName(name);
182 | };
183 |
184 | const _saveDocumentAs = async (filePath, fileType) => {
185 |
186 | let url = await createFile(filePath)
187 |
188 | let saveFile = await fs.getEntryWithUrl(url);
189 |
190 | return await execute(async () => {
191 |
192 | fileType = fileType.toUpperCase()
193 | if (fileType == "JPG") {
194 | await app.activeDocument.saveAs.jpg(saveFile, {
195 | quality:9
196 | }, true)
197 | } else if (fileType == "PNG") {
198 | await app.activeDocument.saveAs.png(saveFile, {
199 | }, true)
200 | } else {
201 | await app.activeDocument.saveAs.psd(saveFile, {
202 | alphaChannels:true,
203 | annotations:true,
204 | embedColorProfile:true,
205 | layers:true,
206 | maximizeCompatibility:true,
207 | spotColor:true,
208 | }, true)
209 | }
210 |
211 | return {savedFilePath:saveFile.nativePath}
212 | });
213 | };
214 |
215 | const execute = async (callback, commandName = "Executing command...") => {
216 | try {
217 | return await core.executeAsModal(callback, {
218 | commandName: commandName,
219 | });
220 | } catch (e) {
221 | throw new Error(`Error executing command [modal] : ${e}`);
222 | }
223 | };
224 |
225 | const tokenify = async (url) => {
226 | let out = await fs.createSessionToken(
227 | await fs.getEntryWithUrl("file:" + url)
228 | );
229 | return out;
230 | };
231 |
232 | const getElementPlacement = (placement) => {
233 | return constants.ElementPlacement[placement.toUpperCase()];
234 | };
235 |
236 | const hasActiveSelection = () => {
237 | return app.activeDocument.selection.bounds != null;
238 | };
239 |
240 | const getMostRecentlyModifiedFile = async (directoryPath) => {
241 | try {
242 | // Get directory contents
243 | const dirEntries = await openfs.readdir(directoryPath);
244 |
245 | const fileDetails = [];
246 |
247 | // Process each file
248 | let i = 0
249 | for (const entry of dirEntries) {
250 | console.log(i++)
251 | const filePath = window.path.join(directoryPath, entry);
252 |
253 | // Get file stats using lstat
254 | try {
255 | const stats = await openfs.lstat(filePath);
256 |
257 | // Skip if it's a directory
258 | if (stats.isDirectory()) {
259 | continue;
260 | }
261 |
262 | fileDetails.push({
263 | name: entry,
264 | path: filePath,
265 | modifiedTime: stats.mtime, // Date object
266 | modifiedTimestamp: stats.mtimeMs // Use mtimeMs directly instead of getTime()
267 | });
268 | } catch (err) {
269 | console.log(`Error getting stats for ${filePath}:`, err);
270 | // Continue to next file if there's an error with this one
271 | continue;
272 | }
273 | }
274 |
275 | if (fileDetails.length === 0) {
276 | return null;
277 | }
278 |
279 | // Sort by modification timestamp (newest first)
280 | fileDetails.sort((a, b) => b.modifiedTimestamp - a.modifiedTimestamp);
281 |
282 | // Return the most recently modified file
283 | return fileDetails[0];
284 | } catch (err) {
285 | console.error('Error getting most recently modified file:', err);
286 | return null;
287 | }
288 | }
289 |
290 | const fileExists = async (filePath) => {
291 | try {
292 | await openfs.lstat(`file:${filePath}`);
293 | return true;
294 | } catch (error) {
295 | return false;
296 | }
297 | }
298 |
299 | const generateDocumentInfo = (document, activeDocument) => {
300 | return {
301 | name:document.name,
302 | id:document.id,
303 | isActive: document === activeDocument,
304 | path:document.path,
305 | saved:document.saved,
306 | title:document.title
307 | };
308 | }
309 |
310 | const listOpenDocuments = () => {
311 | const docs = app.documents;
312 | const activeDocument = app.activeDocument
313 |
314 | let out = []
315 |
316 | for (let doc of docs) {
317 | let d = generateDocumentInfo(doc, activeDocument)
318 | out.push(d)
319 | }
320 |
321 | return out
322 | }
323 |
324 | module.exports = {
325 | findLayerByName,
326 | generateDocumentInfo,
327 | listOpenDocuments,
328 | convertFromPhotoshopFontSize,
329 | convertFontSize,
330 | setVisibleAllLayers,
331 | _saveDocumentAs,
332 | getMostRecentlyModifiedFile,
333 | fileExists,
334 | createFile,
335 | parseColor,
336 | getAlignmentMode,
337 | getJustificationMode,
338 | getBlendMode,
339 | getInterpolationMethod,
340 | getAnchorPosition,
341 | getNewDocumentMode,
342 | getConstantValue,
343 | selectLayer,
344 | clearLayerSelections,
345 | findLayer,
346 | execute,
347 | tokenify,
348 | getElementPlacement,
349 | hasActiveSelection
350 | }
```
--------------------------------------------------------------------------------
/uxp/ps/commands/selection.js:
--------------------------------------------------------------------------------
```javascript
1 | const { app, constants, action } = require("photoshop");
2 | const {
3 | findLayer,
4 | execute,
5 | parseColor,
6 | selectLayer
7 | } = require("./utils");
8 |
9 | const {hasActiveSelection} = require("./utils")
10 |
11 | const clearSelection = async () => {
12 | await app.activeDocument.selection.selectRectangle(
13 | { top: 0, left: 0, bottom: 0, right: 0 },
14 | constants.SelectionType.REPLACE,
15 | 0,
16 | true
17 | );
18 | };
19 |
20 | const createMaskFromSelection = async (command) => {
21 |
22 | let options = command.options;
23 | let layerId = options.layerId;
24 |
25 | let layer = findLayer(layerId);
26 |
27 | if (!layer) {
28 | throw new Error(
29 | `createMaskFromSelection : Could not find layerId : ${layerId}`
30 | );
31 | }
32 |
33 | await execute(async () => {
34 | selectLayer(layer, true);
35 |
36 | let commands = [
37 | {
38 | _obj: "make",
39 | at: {
40 | _enum: "channel",
41 | _ref: "channel",
42 | _value: "mask",
43 | },
44 | new: {
45 | _class: "channel",
46 | },
47 | using: {
48 | _enum: "userMaskEnabled",
49 | _value: "revealSelection",
50 | },
51 | },
52 | ];
53 |
54 | await action.batchPlay(commands, {});
55 | });
56 | };
57 |
58 | const selectSubject = async (command) => {
59 |
60 | let options = command.options;
61 | let layerId = options.layerId;
62 |
63 | let layer = findLayer(layerId);
64 |
65 | if (!layer) {
66 | throw new Error(
67 | `selectSubject : Could not find layerId : ${layerId}`
68 | );
69 | }
70 |
71 | return await execute(async () => {
72 | selectLayer(layer, true);
73 |
74 | let commands = [
75 | // Select Subject
76 | {
77 | _obj: "autoCutout",
78 | sampleAllLayers: false,
79 | },
80 | ];
81 |
82 | await action.batchPlay(commands, {});
83 | });
84 | };
85 |
86 | const selectSky = async (command) => {
87 |
88 | let options = command.options;
89 | let layerId = options.layerId;
90 |
91 | let layer = findLayer(layerId);
92 |
93 | if (!layer) {
94 | throw new Error(`selectSky : Could not find layerId : ${layerId}`);
95 | }
96 |
97 | return await execute(async () => {
98 | selectLayer(layer, true);
99 |
100 | let commands = [
101 | // Select Sky
102 | {
103 | _obj: "selectSky",
104 | sampleAllLayers: false,
105 | },
106 | ];
107 |
108 | await action.batchPlay(commands, {});
109 |
110 | });
111 | };
112 |
113 | const cutSelectionToClipboard = async (command) => {
114 |
115 | let options = command.options;
116 | let layerId = options.layerId;
117 |
118 | let layer = findLayer(layerId);
119 |
120 | if (!layer) {
121 | throw new Error(
122 | `cutSelectionToClipboard : Could not find layerId : ${layerId}`
123 | );
124 | }
125 |
126 | if (!hasActiveSelection()) {
127 | throw new Error(
128 | "cutSelectionToClipboard : Requires an active selection"
129 | );
130 | }
131 |
132 | return await execute(async () => {
133 | selectLayer(layer, true);
134 |
135 | let commands = [
136 | {
137 | _obj: "cut",
138 | },
139 | ];
140 |
141 | await action.batchPlay(commands, {});
142 | });
143 | };
144 |
145 | const copyMergedSelectionToClipboard = async (command) => {
146 |
147 | let options = command.options;
148 |
149 | if (!hasActiveSelection()) {
150 | throw new Error(
151 | "copySelectionToClipboard : Requires an active selection"
152 | );
153 | }
154 |
155 | return await execute(async () => {
156 | let commands = [{
157 | _obj: "copyMerged",
158 | }];
159 |
160 | await action.batchPlay(commands, {});
161 | });
162 | };
163 |
164 | const copySelectionToClipboard = async (command) => {
165 |
166 | let options = command.options;
167 | let layerId = options.layerId;
168 |
169 | let layer = findLayer(layerId);
170 |
171 | if (!layer) {
172 | throw new Error(
173 | `copySelectionToClipboard : Could not find layerId : ${layerId}`
174 | );
175 | }
176 |
177 | if (!hasActiveSelection()) {
178 | throw new Error(
179 | "copySelectionToClipboard : Requires an active selection"
180 | );
181 | }
182 |
183 | return await execute(async () => {
184 | selectLayer(layer, true);
185 |
186 | let commands = [{
187 | _obj: "copyEvent",
188 | copyHint: "pixels",
189 | }];
190 |
191 | await action.batchPlay(commands, {});
192 | });
193 | };
194 |
195 | const pasteFromClipboard = async (command) => {
196 |
197 | let options = command.options;
198 | let layerId = options.layerId;
199 |
200 | let layer = findLayer(layerId);
201 |
202 | if (!layer) {
203 | throw new Error(
204 | `pasteFromClipboard : Could not find layerId : ${layerId}`
205 | );
206 | }
207 |
208 | return await execute(async () => {
209 | selectLayer(layer, true);
210 |
211 | let pasteInPlace = options.pasteInPlace;
212 |
213 | let commands = [
214 | {
215 | _obj: "paste",
216 | antiAlias: {
217 | _enum: "antiAliasType",
218 | _value: "antiAliasNone",
219 | },
220 | as: {
221 | _class: "pixel",
222 | },
223 | inPlace: pasteInPlace,
224 | },
225 | ];
226 |
227 | await action.batchPlay(commands, {});
228 | });
229 | };
230 |
231 | const deleteSelection = async (command) => {
232 |
233 | let options = command.options;
234 | let layerId = options.layerId;
235 | let layer = findLayer(layerId);
236 |
237 | if (!layer) {
238 | throw new Error(
239 | `deleteSelection : Could not find layerId : ${layerId}`
240 | );
241 | }
242 |
243 | if (!app.activeDocument.selection.bounds) {
244 | throw new Error(`invertSelection : Requires an active selection`);
245 | }
246 |
247 | await execute(async () => {
248 | selectLayer(layer, true);
249 | let commands = [
250 | {
251 | _obj: "delete",
252 | },
253 | ];
254 | await action.batchPlay(commands, {});
255 | });
256 | };
257 |
258 | const fillSelection = async (command) => {
259 |
260 | let options = command.options;
261 | let layerId = options.layerId;
262 | let layer = findLayer(layerId);
263 |
264 | if (!layer) {
265 | throw new Error(
266 | `fillSelection : Could not find layerId : ${layerId}`
267 | );
268 | }
269 |
270 | if (!app.activeDocument.selection.bounds) {
271 | throw new Error(`invertSelection : Requires an active selection`);
272 | }
273 |
274 | await execute(async () => {
275 | selectLayer(layer, true);
276 |
277 | let c = parseColor(options.color).rgb;
278 | let commands = [
279 | // Fill
280 | {
281 | _obj: "fill",
282 | color: {
283 | _obj: "RGBColor",
284 | blue: c.blue,
285 | grain: c.green,
286 | red: c.red,
287 | },
288 | mode: {
289 | _enum: "blendMode",
290 | _value: options.blendMode.toLowerCase(),
291 | },
292 | opacity: {
293 | _unit: "percentUnit",
294 | _value: options.opacity,
295 | },
296 | using: {
297 | _enum: "fillContents",
298 | _value: "color",
299 | },
300 | },
301 | ];
302 | await action.batchPlay(commands, {});
303 | });
304 | };
305 |
306 | const selectPolygon = async (command) => {
307 |
308 | let options = command.options;
309 | let layerId = options.layerId;
310 | let layer = findLayer(layerId);
311 |
312 | if (!layer) {
313 | throw new Error(
314 | `selectPolygon : Could not find layerId : ${layerId}`
315 | );
316 | }
317 |
318 | await execute(async () => {
319 |
320 | selectLayer(layer, true);
321 |
322 | await app.activeDocument.selection.selectPolygon(
323 | options.points,
324 | constants.SelectionType.REPLACE,
325 | options.feather,
326 | options.antiAlias
327 | );
328 | });
329 | };
330 |
331 | let selectEllipse = async (command) => {
332 |
333 | let options = command.options;
334 | let layerId = options.layerId;
335 | let layer = findLayer(layerId);
336 |
337 | if (!layer) {
338 | throw new Error(
339 | `selectEllipse : Could not find layerId : ${layerId}`
340 | );
341 | }
342 |
343 | await execute(async () => {
344 |
345 | selectLayer(layer, true);
346 |
347 | await app.activeDocument.selection.selectEllipse(
348 | options.bounds,
349 | constants.SelectionType.REPLACE,
350 | options.feather,
351 | options.antiAlias
352 | );
353 | });
354 | };
355 |
356 | const selectRectangle = async (command) => {
357 | let options = command.options;
358 | let layerId = options.layerId;
359 | let layer = findLayer(layerId);
360 |
361 | if (!layer) {
362 | throw new Error(
363 | `selectRectangle : Could not find layerId : ${layerId}`
364 | );
365 | }
366 |
367 | await execute(async () => {
368 | selectLayer(layer, true);
369 |
370 | await app.activeDocument.selection.selectRectangle(
371 | options.bounds,
372 | constants.SelectionType.REPLACE,
373 | options.feather,
374 | options.antiAlias
375 | );
376 | });
377 | };
378 |
379 | const invertSelection = async (command) => {
380 |
381 | if (!app.activeDocument.selection.bounds) {
382 | throw new Error(`invertSelection : Requires an active selection`);
383 | }
384 |
385 | await execute(async () => {
386 | let commands = [
387 | {
388 | _obj: "inverse",
389 | },
390 | ];
391 | await action.batchPlay(commands, {});
392 | });
393 | };
394 |
395 | const commandHandlers = {
396 | clearSelection,
397 | createMaskFromSelection,
398 | selectSubject,
399 | selectSky,
400 | cutSelectionToClipboard,
401 | copyMergedSelectionToClipboard,
402 | copySelectionToClipboard,
403 | pasteFromClipboard,
404 | deleteSelection,
405 | fillSelection,
406 | selectPolygon,
407 | selectEllipse,
408 | selectRectangle,
409 | invertSelection
410 | };
411 |
412 | module.exports = {
413 | commandHandlers
414 | };
```
--------------------------------------------------------------------------------
/uxp/ps/commands/layer_styles.js:
--------------------------------------------------------------------------------
```javascript
1 | /* MIT License
2 | *
3 | * Copyright (c) 2025 Mike Chambers
4 | *
5 | * Permission is hereby granted, free of charge, to any person obtaining a copy
6 | * of this software and associated documentation files (the "Software"), to deal
7 | * in the Software without restriction, including without limitation the rights
8 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | * copies of the Software, and to permit persons to whom the Software is
10 | * furnished to do so, subject to the following conditions:
11 | *
12 | * The above copyright notice and this permission notice shall be included in all
13 | * copies or substantial portions of the Software.
14 | *
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | * SOFTWARE.
22 | */
23 |
24 | const { action } = require("photoshop");
25 |
26 | const {
27 | selectLayer,
28 | findLayer,
29 | execute
30 | } = require("./utils")
31 |
32 | const addDropShadowLayerStyle = async (command) => {
33 |
34 | let options = command.options;
35 | let layerId = options.layerId;
36 |
37 | let layer = findLayer(layerId);
38 |
39 | if (!layer) {
40 | throw new Error(
41 | `addDropShadowLayerStyle : Could not find layerId : ${layerId}`
42 | );
43 | }
44 |
45 | await execute(async () => {
46 | selectLayer(layer, true);
47 |
48 | let commands = [
49 | // Set Layer Styles of current layer
50 | {
51 | _obj: "set",
52 | _target: [
53 | {
54 | _property: "layerEffects",
55 | _ref: "property",
56 | },
57 | {
58 | _enum: "ordinal",
59 | _ref: "layer",
60 | _value: "targetEnum",
61 | },
62 | ],
63 | to: {
64 | _obj: "layerEffects",
65 | dropShadow: {
66 | _obj: "dropShadow",
67 | antiAlias: false,
68 | blur: {
69 | _unit: "pixelsUnit",
70 | _value: options.size,
71 | },
72 | chokeMatte: {
73 | _unit: "pixelsUnit",
74 | _value: options.spread,
75 | },
76 | color: {
77 | _obj: "RGBColor",
78 | blue: options.color.blue,
79 | grain: options.color.green,
80 | red: options.color.red,
81 | },
82 | distance: {
83 | _unit: "pixelsUnit",
84 | _value: options.distance,
85 | },
86 | enabled: true,
87 | layerConceals: true,
88 | localLightingAngle: {
89 | _unit: "angleUnit",
90 | _value: options.angle,
91 | },
92 | mode: {
93 | _enum: "blendMode",
94 | _value: options.blendMode.toLowerCase(),
95 | },
96 | noise: {
97 | _unit: "percentUnit",
98 | _value: 0.0,
99 | },
100 | opacity: {
101 | _unit: "percentUnit",
102 | _value: options.opacity,
103 | },
104 | present: true,
105 | showInDialog: true,
106 | transferSpec: {
107 | _obj: "shapeCurveType",
108 | name: "Linear",
109 | },
110 | useGlobalAngle: true,
111 | },
112 | globalLightingAngle: {
113 | _unit: "angleUnit",
114 | _value: options.angle,
115 | },
116 | scale: {
117 | _unit: "percentUnit",
118 | _value: 100.0,
119 | },
120 | },
121 | },
122 | ];
123 |
124 | await action.batchPlay(commands, {});
125 | });
126 | };
127 |
128 | const addStrokeLayerStyle = async (command) => {
129 | const options = command.options
130 |
131 | const layerId = options.layerId
132 |
133 | let layer = findLayer(layerId)
134 |
135 | if (!layer) {
136 | throw new Error(
137 | `addStrokeLayerStyle : Could not find layerId : ${layerId}`
138 | );
139 | }
140 |
141 | let position = "centeredFrame"
142 |
143 | if (options.position == "INSIDE") {
144 | position = "insetFrame"
145 | } else if (options.position == "OUTSIDE") {
146 | position = "outsetFrame"
147 | }
148 |
149 |
150 | await execute(async () => {
151 | selectLayer(layer, true);
152 |
153 | let strokeColor = options.color
154 | let commands = [
155 | // Set Layer Styles of current layer
156 | {
157 | "_obj": "set",
158 | "_target": [
159 | {
160 | "_property": "layerEffects",
161 | "_ref": "property"
162 | },
163 | {
164 | "_enum": "ordinal",
165 | "_ref": "layer",
166 | "_value": "targetEnum"
167 | }
168 | ],
169 | "to": {
170 | "_obj": "layerEffects",
171 | "frameFX": {
172 | "_obj": "frameFX",
173 | "color": {
174 | "_obj": "RGBColor",
175 | "blue": strokeColor.blue,
176 | "grain": strokeColor.green,
177 | "red": strokeColor.red
178 | },
179 | "enabled": true,
180 | "mode": {
181 | "_enum": "blendMode",
182 | "_value": options.blendMode.toLowerCase()
183 | },
184 | "opacity": {
185 | "_unit": "percentUnit",
186 | "_value": options.opacity
187 | },
188 | "overprint": false,
189 | "paintType": {
190 | "_enum": "frameFill",
191 | "_value": "solidColor"
192 | },
193 | "present": true,
194 | "showInDialog": true,
195 | "size": {
196 | "_unit": "pixelsUnit",
197 | "_value": options.size
198 | },
199 | "style": {
200 | "_enum": "frameStyle",
201 | "_value": position
202 | }
203 | },
204 | "scale": {
205 | "_unit": "percentUnit",
206 | "_value": 100.0
207 | }
208 | }
209 | }
210 | ];
211 |
212 | await action.batchPlay(commands, {});
213 | });
214 | }
215 |
216 | const createGradientLayerStyle = async (command) => {
217 |
218 | let options = command.options;
219 | let layerId = options.layerId;
220 |
221 | let layer = findLayer(layerId);
222 |
223 | if (!layer) {
224 | throw new Error(
225 | `createGradientAdjustmentLayer : Could not find layerId : ${layerId}`
226 | );
227 | }
228 |
229 | await execute(async () => {
230 | selectLayer(layer, true);
231 |
232 | let angle = options.angle;
233 | let colorStops = options.colorStops;
234 | let opacityStops = options.opacityStops;
235 |
236 | let colors = [];
237 | for (let c of colorStops) {
238 | colors.push({
239 | _obj: "colorStop",
240 | color: {
241 | _obj: "RGBColor",
242 | blue: c.color.blue,
243 | grain: c.color.green,
244 | red: c.color.red,
245 | },
246 | location: Math.round((c.location / 100) * 4096),
247 | midpoint: c.midpoint,
248 | type: {
249 | _enum: "colorStopType",
250 | _value: "userStop",
251 | },
252 | });
253 | }
254 |
255 | let opacities = [];
256 | for (let o of opacityStops) {
257 | opacities.push({
258 | _obj: "transferSpec",
259 | location: Math.round((o.location / 100) * 4096),
260 | midpoint: o.midpoint,
261 | opacity: {
262 | _unit: "percentUnit",
263 | _value: o.opacity,
264 | },
265 | });
266 | }
267 |
268 | let commands = [
269 | // Make fill layer
270 | {
271 | _obj: "make",
272 | _target: [
273 | {
274 | _ref: "contentLayer",
275 | },
276 | ],
277 | using: {
278 | _obj: "contentLayer",
279 | type: {
280 | _obj: "gradientLayer",
281 | angle: {
282 | _unit: "angleUnit",
283 | _value: angle,
284 | },
285 | gradient: {
286 | _obj: "gradientClassEvent",
287 | colors: colors,
288 | gradientForm: {
289 | _enum: "gradientForm",
290 | _value: "customStops",
291 | },
292 | interfaceIconFrameDimmed: 4096.0,
293 | name: "Custom",
294 | transparency: opacities,
295 | },
296 | gradientsInterpolationMethod: {
297 | _enum: "gradientInterpolationMethodType",
298 | _value: "smooth",
299 | },
300 | type: {
301 | _enum: "gradientType",
302 | _value: options.type.toLowerCase(),
303 | },
304 | },
305 | },
306 | },
307 | ];
308 |
309 | await action.batchPlay(commands, {});
310 | });
311 | };
312 |
313 |
314 |
315 | const commandHandlers = {
316 | createGradientLayerStyle,
317 | addStrokeLayerStyle,
318 | addDropShadowLayerStyle
319 | };
320 |
321 | module.exports = {
322 | commandHandlers
323 | };
```
--------------------------------------------------------------------------------
/uxp/pr/commands/utils.js:
--------------------------------------------------------------------------------
```javascript
1 | /* MIT License
2 | *
3 | * Copyright (c) 2025 Mike Chambers
4 | *
5 | * Permission is hereby granted, free of charge, to any person obtaining a copy
6 | * of this software and associated documentation files (the "Software"), to deal
7 | * in the Software without restriction, including without limitation the rights
8 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | * copies of the Software, and to permit persons to whom the Software is
10 | * furnished to do so, subject to the following conditions:
11 | *
12 | * The above copyright notice and this permission notice shall be included in all
13 | * copies or substantial portions of the Software.
14 | *
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | * SOFTWARE.
22 | */
23 |
24 | const app = require("premierepro");
25 | const { TRACK_TYPE, TICKS_PER_SECOND } = require("./consts.js");
26 |
27 | const _getSequenceFromId = async (id) => {
28 | let project = await app.Project.getActiveProject();
29 |
30 | let guid = app.Guid.fromString(id);
31 | let sequence = await project.getSequence(guid);
32 |
33 | if (!sequence) {
34 | throw new Error(
35 | `_getSequenceFromId : Could not find sequence with id : ${id}`
36 | );
37 | }
38 |
39 | return sequence;
40 | };
41 |
42 | const _setActiveSequence = async (sequence) => {
43 | let project = await app.Project.getActiveProject();
44 | await project.setActiveSequence(sequence);
45 |
46 | let item = await findProjectItem(sequence.name, project);
47 | await app.SourceMonitor.openProjectItem(item);
48 | };
49 |
50 | const setParam = async (trackItem, componentName, paramName, value) => {
51 | const project = await app.Project.getActiveProject();
52 |
53 | let param = await getParam(trackItem, componentName, paramName);
54 |
55 | let keyframe = await param.createKeyframe(value);
56 |
57 | execute(() => {
58 | let action = param.createSetValueAction(keyframe);
59 | return [action];
60 | }, project);
61 | };
62 |
63 | const getParam = async (trackItem, componentName, paramName) => {
64 | let components = await trackItem.getComponentChain();
65 |
66 | const count = components.getComponentCount();
67 | for (let i = 0; i < count; i++) {
68 | const component = components.getComponentAtIndex(i);
69 |
70 | //search for match name
71 | //component name AE.ADBE Opacity
72 | const matchName = await component.getMatchName();
73 |
74 | if (matchName == componentName) {
75 | console.log(matchName);
76 | let pCount = component.getParamCount();
77 |
78 | for (let j = 0; j < pCount; j++) {
79 | const param = component.getParam(j);
80 |
81 | console.log(param.type);
82 | console.log(param);
83 | if (param.displayName == paramName) {
84 | return param;
85 | }
86 | }
87 | }
88 | }
89 | };
90 |
91 | const addEffect = async (trackItem, effectName) => {
92 | let project = await app.Project.getActiveProject();
93 | const effect = await app.VideoFilterFactory.createComponent(effectName);
94 |
95 | let componentChain = await trackItem.getComponentChain();
96 |
97 | execute(() => {
98 | let action = componentChain.createAppendComponentAction(effect, 0); //todo, second isnt needed
99 | return [action];
100 | }, project);
101 | };
102 |
103 | /*
104 | const findProjectItem2 = async (itemName, project) => {
105 | let root = await project.getRootItem();
106 | let rootItems = await root.getItems();
107 |
108 | let insertItem;
109 | for (const item of rootItems) {
110 | if (item.name == itemName) {
111 | insertItem = item;
112 | break;
113 | }
114 | }
115 |
116 | if (!insertItem) {
117 | throw new Error(
118 | `addItemToSequence : Could not find item named ${itemName}`
119 | );
120 | }
121 |
122 | return insertItem;
123 | };
124 | */
125 |
126 | const findProjectItem = async (itemName, project) => {
127 | let root = await project.getRootItem();
128 |
129 | const searchItems = async (parentItem) => {
130 | let items = await parentItem.getItems();
131 |
132 | // First, check items at this level
133 | for (const item of items) {
134 | if (item.name === itemName) {
135 | return item;
136 | }
137 | }
138 |
139 | // If not found, search recursively in bins/folders
140 | for (const item of items) {
141 | const folderItem = app.FolderItem.cast(item);
142 | if (folderItem) {
143 | // This is a bin/folder, search inside it
144 | const foundItem = await searchItems(folderItem);
145 | if (foundItem) {
146 | return foundItem;
147 | }
148 | }
149 | }
150 |
151 | return null; // Not found at this level or in any sub-folders
152 | };
153 |
154 | const insertItem = await searchItems(root);
155 |
156 | if (!insertItem) {
157 | throw new Error(
158 | `addItemToSequence : Could not find item named ${itemName}`
159 | );
160 | }
161 |
162 | return insertItem;
163 | };
164 |
165 |
166 | const execute = (getActions, project) => {
167 | try {
168 | project.lockedAccess(() => {
169 | project.executeTransaction((compoundAction) => {
170 | let actions = getActions();
171 |
172 | for (const a of actions) {
173 | compoundAction.addAction(a);
174 | }
175 | });
176 | });
177 | } catch (e) {
178 | throw new Error(`Error executing locked transaction : ${e}`);
179 | }
180 | };
181 |
182 | const getTracks = async (sequence, trackType) => {
183 | let count;
184 |
185 | if (trackType === TRACK_TYPE.VIDEO) {
186 | count = await sequence.getVideoTrackCount();
187 | } else if (trackType === TRACK_TYPE.AUDIO) {
188 | count = await sequence.getAudioTrackCount();
189 | }
190 |
191 | let tracks = [];
192 | for (let i = 0; i < count; i++) {
193 | let track;
194 |
195 | if (trackType === TRACK_TYPE.VIDEO) {
196 | track = await sequence.getVideoTrack(i);
197 | } else if (trackType === TRACK_TYPE.AUDIO) {
198 | track = await sequence.getAudioTrack(i);
199 | }
200 |
201 | let out = {
202 | index: i,
203 | tracks: [],
204 | };
205 |
206 | let clips = await track.getTrackItems(1, false);
207 |
208 | if (clips.length === 0) {
209 | continue;
210 | }
211 |
212 | let k = 0;
213 | for (const c of clips) {
214 | let startTimeTicks = (await c.getStartTime()).ticks;
215 | let endTimeTicks = (await c.getEndTime()).ticks;
216 | let durationTicks = (await c.getDuration()).ticks;
217 | let durationSeconds = (await c.getDuration()).seconds;
218 | let name = (await c.getProjectItem()).name;
219 | let type = await c.getType();
220 | let index = k++;
221 |
222 | out.tracks.push({
223 | startTimeTicks,
224 | endTimeTicks,
225 | durationTicks,
226 | durationSeconds,
227 | name,
228 | type,
229 | index,
230 | });
231 | }
232 |
233 | tracks.push(out);
234 | }
235 | return tracks;
236 | };
237 |
238 | const getSequences = async () => {
239 | let project = await app.Project.getActiveProject();
240 | let active = await project.getActiveSequence();
241 |
242 | let sequences = await project.getSequences();
243 |
244 | let out = [];
245 | for (const sequence of sequences) {
246 | let size = await sequence.getFrameSize();
247 | //let settings = await sequence.getSettings()
248 |
249 | //let projectItem = await sequence.getProjectItem()
250 | //let name = projectItem.name
251 | let name = sequence.name;
252 | let id = sequence.guid.toString();
253 |
254 | let videoTracks = await getTracks(sequence,TRACK_TYPE.VIDEO);
255 | let audioTracks = await getTracks(sequence, TRACK_TYPE.AUDIO);
256 |
257 | let isActive = active == sequence;
258 |
259 |
260 | let timebase = await sequence.getTimebase()
261 | let fps = TICKS_PER_SECOND / timebase
262 |
263 | let endTime = await sequence.getEndTime()
264 | let durationSeconds = await endTime.seconds
265 | let durationTicks = await endTime.ticksNumber
266 | let ticksPerSecond = TICKS_PER_SECOND
267 |
268 | out.push({
269 | isActive,
270 | name,
271 | id,
272 | frameSize: { width: size.width, height: size.height },
273 | videoTracks,
274 | audioTracks,
275 | timebase,
276 | fps,
277 | durationSeconds,
278 | durationTicks,
279 | ticksPerSecond
280 | });
281 | }
282 |
283 | return out;
284 | };
285 |
286 | const getTrack = async (sequence, trackIndex, clipIndex, trackType) => {
287 | let trackItems = await getTrackItems(sequence, trackIndex, trackType);
288 |
289 | let trackItem;
290 | let i = 0;
291 | for (const t of trackItems) {
292 | let index = i++;
293 | if (index === clipIndex) {
294 | trackItem = t;
295 | break;
296 | }
297 | }
298 | if (!trackItem) {
299 | throw new Error(
300 | `getTrack : trackItemIndex [${clipIndex}] does not exist for track type [${trackType}]`
301 | );
302 | }
303 |
304 | return trackItem;
305 | };
306 |
307 | /*
308 | const getAudioTrack = async (sequence, trackIndex, clipIndex) => {
309 |
310 | let trackItems = await getAudioTrackItems(sequence, trackIndex)
311 |
312 | let trackItem;
313 | let i = 0
314 | for(const t of trackItems) {
315 | let index = i++
316 | if(index === clipIndex) {
317 | trackItem = t
318 | break
319 | }
320 | }
321 | if(!trackItem) {
322 | throw new Error(`getAudioTrack : trackItemIndex [${clipIndex}] does not exist`)
323 | }
324 |
325 | return trackItem
326 | }
327 | */
328 |
329 | const getTrackItems = async (sequence, trackIndex, trackType) => {
330 | let track;
331 |
332 | if (trackType === TRACK_TYPE.AUDIO) {
333 | track = await sequence.getAudioTrack(trackIndex);
334 | } else if (trackType === TRACK_TYPE.VIDEO) {
335 | track = await sequence.getVideoTrack(trackIndex);
336 | }
337 |
338 | if (!track) {
339 | throw new Error(
340 | `getTrackItems : getTrackItems [${trackIndex}] does not exist. Type : [${trackType}]`
341 | );
342 | }
343 |
344 | let trackItems = await track.getTrackItems(1, false);
345 |
346 | return trackItems;
347 | };
348 |
349 | /*
350 | const getAudioTrackItems = async (sequence, trackIndex) => {
351 | let audioTrack = await sequence.getAudioTrack(trackIndex)
352 |
353 | if(!audioTrack) {
354 | throw new Error(`getAudioTrackItems : getAudioTrackItems [${trackIndex}] does not exist`)
355 | }
356 |
357 | let trackItems = await audioTrack.getTrackItems(1, false)
358 |
359 | return trackItems
360 | }
361 |
362 | const getVideoTrackItems = async (sequence, trackIndex) => {
363 | let videoTrack = await sequence.getVideoTrack(trackIndex)
364 |
365 | if(!videoTrack) {
366 | throw new Error(`getVideoTrackItems : videoTrackIndex [${trackIndex}] does not exist`)
367 | }
368 |
369 | let trackItems = await videoTrack.getTrackItems(1, false)
370 |
371 | return trackItems
372 | }
373 | */
374 | /*
375 | const getVideoTrack = async (sequence, trackIndex, clipIndex) => {
376 |
377 | let trackItems = await getVideoTrackItems(sequence, trackIndex)
378 |
379 | let trackItem;
380 | let i = 0
381 | for(const t of trackItems) {
382 | let index = i++
383 | if(index === clipIndex) {
384 | trackItem = t
385 | break
386 | }
387 | }
388 | if(!trackItem) {
389 | throw new Error(`getVideoTrack : clipIndex [${clipIndex}] does not exist`)
390 | }
391 |
392 | return trackItem
393 | }
394 | */
395 |
396 | module.exports = {
397 | getTrackItems,
398 | _getSequenceFromId,
399 | _setActiveSequence,
400 | setParam,
401 | getParam,
402 | addEffect,
403 | findProjectItem,
404 | execute,
405 | getTracks,
406 | getSequences,
407 | getTrack,
408 | };
409 |
```
--------------------------------------------------------------------------------
/cep/com.mikechambers.ai/commands.js:
--------------------------------------------------------------------------------
```javascript
1 | /* commands.js
2 | * Illustrator command handlers
3 | */
4 |
5 |
6 | const getDocuments = async (command) => {
7 | const script = `
8 | (function() {
9 | try {
10 | var result = (function() {
11 | if (app.documents.length > 0) {
12 | var activeDoc = app.activeDocument;
13 | var docs = [];
14 |
15 | for (var i = 0; i < app.documents.length; i++) {
16 | var doc = app.documents[i];
17 | docs.push($.global.createDocumentInfo(doc, activeDoc));
18 | }
19 |
20 | return docs;
21 | } else {
22 | return [];
23 | }
24 | })();
25 |
26 | if (result === undefined) {
27 | return 'null';
28 | }
29 |
30 | return JSON.stringify(result);
31 | } catch(e) {
32 | return JSON.stringify({
33 | error: e.toString(),
34 | line: e.line || 'unknown'
35 | });
36 | }
37 | })();
38 | `;
39 |
40 | let result = await executeCommand(script);
41 | return createPacket(result);
42 | }
43 |
44 | const exportPNG = async (command) => {
45 | const options = command.options || {};
46 |
47 | // Extract all options into variables
48 | const path = options.path;
49 | const transparency = options.transparency !== undefined ? options.transparency : true;
50 | const antiAliasing = options.antiAliasing !== undefined ? options.antiAliasing : true;
51 | const artBoardClipping = options.artBoardClipping !== undefined ? options.artBoardClipping : true;
52 | const horizontalScale = options.horizontalScale || 100;
53 | const verticalScale = options.verticalScale || 100;
54 | const exportType = options.exportType || 'PNG24';
55 | const matte = options.matte;
56 | const matteColor = options.matteColor;
57 |
58 | // Validate required path parameter
59 | if (!path) {
60 | return createPacket(JSON.stringify({
61 | error: "Path is required for PNG export"
62 | }));
63 | }
64 |
65 | const script = `
66 | (function() {
67 | try {
68 | var result = (function() {
69 | if (app.documents.length === 0) {
70 | return { error: "No document is currently open" };
71 | }
72 |
73 | var doc = app.activeDocument;
74 | var exportPath = "${path}";
75 |
76 | // Export options from variables
77 | var exportOptions = {
78 | transparency: ${transparency},
79 | antiAliasing: ${antiAliasing},
80 | artBoardClipping: ${artBoardClipping},
81 | horizontalScale: ${horizontalScale},
82 | verticalScale: ${verticalScale},
83 | exportType: "${exportType}"
84 | };
85 |
86 | ${matte !== undefined ? `exportOptions.matte = ${matte};` : ''}
87 | ${matteColor ? `exportOptions.matteColor = ${JSON.stringify(matteColor)};` : ''}
88 |
89 | // Use the global helper function if available, otherwise inline export
90 | if (typeof $.global.exportToPNG === 'function') {
91 | return $.global.exportToPNG(doc, exportPath, exportOptions);
92 | } else {
93 | // Inline export logic
94 | try {
95 | // Create PNG export options
96 | var pngOptions = exportOptions.exportType === 'PNG8' ?
97 | new ExportOptionsPNG8() : new ExportOptionsPNG24();
98 |
99 | pngOptions.transparency = exportOptions.transparency;
100 | pngOptions.antiAliasing = exportOptions.antiAliasing;
101 | pngOptions.artBoardClipping = exportOptions.artBoardClipping;
102 | pngOptions.horizontalScale = exportOptions.horizontalScale;
103 | pngOptions.verticalScale = exportOptions.verticalScale;
104 |
105 | ${matte !== undefined ? `pngOptions.matte = ${matte};` : ''}
106 |
107 | ${matteColor ? `
108 | // Set matte color
109 | pngOptions.matteColor.red = ${matteColor.red};
110 | pngOptions.matteColor.green = ${matteColor.green};
111 | pngOptions.matteColor.blue = ${matteColor.blue};
112 | ` : ''}
113 |
114 | // Create file object
115 | var exportFile = new File(exportPath);
116 |
117 | // Determine export type
118 | var exportType = exportOptions.exportType === 'PNG8' ?
119 | ExportType.PNG8 : ExportType.PNG24;
120 |
121 | // Export the file
122 | doc.exportFile(exportFile, exportType, pngOptions);
123 |
124 | return {
125 | success: true,
126 | filePath: exportFile.fsName,
127 | fileExists: exportFile.exists,
128 | options: exportOptions,
129 | documentName: doc.name
130 | };
131 |
132 | } catch(exportError) {
133 | return {
134 | success: false,
135 | error: exportError.toString(),
136 | filePath: exportPath,
137 | options: exportOptions,
138 | documentName: doc.name
139 | };
140 | }
141 | }
142 | })();
143 |
144 | if (result === undefined) {
145 | return 'null';
146 | }
147 |
148 | return JSON.stringify(result);
149 | } catch(e) {
150 | return JSON.stringify({
151 | error: e.toString(),
152 | line: e.line || 'unknown'
153 | });
154 | }
155 | })();
156 | `;
157 |
158 | let result = await executeCommand(script);
159 | return createPacket(result);
160 | }
161 |
162 | const openFile = async (command) => {
163 | const options = command.options || {};
164 |
165 | // Extract path parameter
166 | const path = options.path;
167 |
168 | // Validate required path parameter
169 | if (!path) {
170 | return createPacket(JSON.stringify({
171 | error: "Path is required to open an Illustrator file"
172 | }));
173 | }
174 |
175 | const script = `
176 | (function() {
177 | try {
178 | var result = (function() {
179 | var filePath = "${path}";
180 |
181 | try {
182 | // Create file object
183 | var fileToOpen = new File(filePath);
184 |
185 | // Check if file exists
186 | if (!fileToOpen.exists) {
187 | return {
188 | success: false,
189 | error: "File does not exist at the specified path",
190 | filePath: filePath
191 | };
192 | }
193 |
194 | // Open the document
195 | var doc = app.open(fileToOpen);
196 |
197 | return {
198 | success: true,
199 | };
200 |
201 | } catch(openError) {
202 | return {
203 | success: false,
204 | error: openError.toString(),
205 | filePath: filePath
206 | };
207 | }
208 | })();
209 |
210 | if (result === undefined) {
211 | return 'null';
212 | }
213 |
214 | return JSON.stringify(result);
215 | } catch(e) {
216 | return JSON.stringify({
217 | error: e.toString(),
218 | line: e.line || 'unknown'
219 | });
220 | }
221 | })();
222 | `;
223 |
224 | let result = await executeCommand(script);
225 | return createPacket(result);
226 | };
227 |
228 | const getActiveDocumentInfo = async (command) => {
229 | const script = `
230 | (function() {
231 | try {
232 | var result = (function() {
233 | if (app.documents.length > 0) {
234 | var doc = app.activeDocument;
235 | return $.global.createDocumentInfo(doc, doc);
236 | } else {
237 | return { error: "No document is currently open" };
238 | }
239 | })();
240 |
241 | if (result === undefined) {
242 | return 'null';
243 | }
244 |
245 | return JSON.stringify(result);
246 | } catch(e) {
247 | return JSON.stringify({
248 | error: e.toString(),
249 | line: e.line || 'unknown'
250 | });
251 | }
252 | })();
253 | `;
254 |
255 | let result = await executeCommand(script);
256 | return createPacket(result);
257 | }
258 |
259 | // Execute Illustrator command via ExtendScript
260 | function executeCommand(script) {
261 | return new Promise((resolve, reject) => {
262 | const csInterface = new CSInterface();
263 | csInterface.evalScript(script, (result) => {
264 | if (result === 'EvalScript error.') {
265 | reject(new Error('ExtendScript execution failed'));
266 | } else {
267 | try {
268 | resolve(JSON.parse(result));
269 | } catch (e) {
270 | resolve(result);
271 | }
272 | }
273 | });
274 | });
275 | }
276 |
277 |
278 | async function executeExtendScript(command) {
279 | const options = command.options
280 | const scriptString = options.scriptString;
281 |
282 | const script = `
283 | (function() {
284 | try {
285 | ${scriptString}
286 | } catch(e) {
287 | return JSON.stringify({
288 | error: e.toString(),
289 | line: e.line || 'unknown'
290 | });
291 | }
292 | })();
293 | `;
294 |
295 | const result = await executeCommand(script);
296 |
297 | return createPacket(result);
298 | }
299 |
300 | const createPacket = (result) => {
301 | return {
302 | content: [{
303 | type: "text",
304 | text: JSON.stringify(result, null, 2)
305 | }]
306 | };
307 | }
308 |
309 | const parseAndRouteCommand = async (command) => {
310 | let action = command.action;
311 |
312 | let f = commandHandlers[action];
313 |
314 | if (typeof f !== "function") {
315 | throw new Error(`Unknown Command: ${action}`);
316 | }
317 |
318 | console.log(f.name)
319 | return await f(command);
320 | };
321 |
322 |
323 | // Execute commands
324 | /*
325 | async function executeCommand(command) {
326 | switch(command.action) {
327 |
328 | case "getLayers":
329 | return await getLayers();
330 |
331 | case "executeExtendScript":
332 | return await executeExtendScript(command);
333 |
334 | default:
335 | throw new Error(`Unknown command: ${command.action}`);
336 | }
337 | }*/
338 |
339 | const commandHandlers = {
340 | executeExtendScript,
341 | getDocuments,
342 | getActiveDocumentInfo,
343 | exportPNG,
344 | openFile
345 | };
```
--------------------------------------------------------------------------------
/uxp/ps/commands/core.js:
--------------------------------------------------------------------------------
```javascript
1 | /* MIT License
2 | *
3 | * Copyright (c) 2025 Mike Chambers
4 | *
5 | * Permission is hereby granted, free of charge, to any person obtaining a copy
6 | * of this software and associated documentation files (the "Software"), to deal
7 | * in the Software without restriction, including without limitation the rights
8 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | * copies of the Software, and to permit persons to whom the Software is
10 | * furnished to do so, subject to the following conditions:
11 | *
12 | * The above copyright notice and this permission notice shall be included in all
13 | * copies or substantial portions of the Software.
14 | *
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | * SOFTWARE.
22 | */
23 |
24 | const { app, constants, action, imaging } = require("photoshop");
25 | const fs = require("uxp").storage.localFileSystem;
26 |
27 | const {
28 | _saveDocumentAs,
29 | parseColor,
30 | getAlignmentMode,
31 | getNewDocumentMode,
32 | selectLayer,
33 | findLayer,
34 | findLayerByName,
35 | execute,
36 | tokenify,
37 | hasActiveSelection,
38 | listOpenDocuments
39 | } = require("./utils");
40 |
41 | const { rasterizeLayer } = require("./layers").commandHandlers;
42 |
43 | const openFile = async (command) => {
44 | let options = command.options;
45 |
46 | await execute(async () => {
47 | let entry = null;
48 | try {
49 | entry = await fs.getEntryWithUrl("file:" + options.filePath);
50 | } catch (e) {
51 | throw new Error(
52 | "openFile: Could not create file entry. File probably does not exist."
53 | );
54 | }
55 |
56 | await app.open(entry);
57 | });
58 | };
59 |
60 | const placeImage = async (command) => {
61 | let options = command.options;
62 | let layerId = options.layerId;
63 | let layer = findLayer(layerId);
64 |
65 | if (!layer) {
66 | throw new Error(`placeImage : Could not find layerId : ${layerId}`);
67 | }
68 |
69 | await execute(async () => {
70 | selectLayer(layer, true);
71 | let layerId = layer.id;
72 |
73 | let imagePath = await tokenify(options.imagePath);
74 |
75 | let commands = [
76 | // Place
77 | {
78 | ID: layerId,
79 | _obj: "placeEvent",
80 | freeTransformCenterState: {
81 | _enum: "quadCenterState",
82 | _value: "QCSAverage",
83 | },
84 | null: {
85 | _kind: "local",
86 | _path: imagePath,
87 | },
88 | offset: {
89 | _obj: "offset",
90 | horizontal: {
91 | _unit: "pixelsUnit",
92 | _value: 0.0,
93 | },
94 | vertical: {
95 | _unit: "pixelsUnit",
96 | _value: 0.0,
97 | },
98 | },
99 | replaceLayer: {
100 | _obj: "placeEvent",
101 | to: {
102 | _id: layerId,
103 | _ref: "layer",
104 | },
105 | },
106 | },
107 | {
108 | _obj: "set",
109 | _target: [
110 | {
111 | _enum: "ordinal",
112 | _ref: "layer",
113 | _value: "targetEnum",
114 | },
115 | ],
116 | to: {
117 | _obj: "layer",
118 | name: layerId,
119 | },
120 | },
121 | ];
122 |
123 | await action.batchPlay(commands, {});
124 | await rasterizeLayer(command);
125 | });
126 | };
127 |
128 | const getDocumentImage = async (command) => {
129 | let out = await execute(async () => {
130 |
131 | const pixelsOpt = {
132 | applyAlpha: true
133 | };
134 |
135 | const imgObj = await imaging.getPixels(pixelsOpt);
136 |
137 | const base64Data = await imaging.encodeImageData({
138 | imageData: imgObj.imageData,
139 | base64: true,
140 | });
141 |
142 | const result = {
143 | base64Image: base64Data,
144 | dataUrl: `data:image/jpeg;base64,${base64Data}`,
145 | width: imgObj.imageData.width,
146 | height: imgObj.imageData.height,
147 | colorSpace: imgObj.imageData.colorSpace,
148 | components: imgObj.imageData.components,
149 | format: "jpeg",
150 | };
151 |
152 | imgObj.imageData.dispose();
153 | return result;
154 | });
155 |
156 | return out;
157 | };
158 |
159 | const getDocumentInfo = async (command) => {
160 | let doc = app.activeDocument;
161 | let path = doc.path;
162 |
163 | let out = {
164 | height: doc.height,
165 | width: doc.width,
166 | colorMode: doc.mode.toString(),
167 | pixelAspectRatio: doc.pixelAspectRatio,
168 | resolution: doc.resolution,
169 | path: path,
170 | saved: path.length > 0,
171 | hasUnsavedChanges: !doc.saved,
172 | };
173 |
174 | return out;
175 | };
176 |
177 | const cropDocument = async (command) => {
178 | let options = command.options;
179 |
180 | if (!hasActiveSelection()) {
181 | throw new Error("cropDocument : Requires an active selection");
182 | }
183 |
184 | return await execute(async () => {
185 | let commands = [
186 | // Crop
187 | {
188 | _obj: "crop",
189 | delete: true,
190 | },
191 | ];
192 |
193 | await action.batchPlay(commands, {});
194 | });
195 | };
196 |
197 | const removeBackground = async (command) => {
198 | let options = command.options;
199 | let layerId = options.layerId;
200 |
201 | let layer = findLayer(layerId);
202 |
203 | if (!layer) {
204 | throw new Error(
205 | `removeBackground : Could not find layerId : ${layerId}`
206 | );
207 | }
208 |
209 | await execute(async () => {
210 | selectLayer(layer, true);
211 |
212 | let commands = [
213 | // Remove Background
214 | {
215 | _obj: "removeBackground",
216 | },
217 | ];
218 |
219 | await action.batchPlay(commands, {});
220 | });
221 | };
222 |
223 | const alignContent = async (command) => {
224 | let options = command.options;
225 | let layerId = options.layerId;
226 |
227 | let layer = findLayer(layerId);
228 |
229 | if (!layer) {
230 | throw new Error(
231 | `alignContent : Could not find layerId : ${layerId}`
232 | );
233 | }
234 |
235 | if (!app.activeDocument.selection.bounds) {
236 | throw new Error(`alignContent : Requires an active selection`);
237 | }
238 |
239 | await execute(async () => {
240 | let m = getAlignmentMode(options.alignmentMode);
241 |
242 | selectLayer(layer, true);
243 |
244 | let commands = [
245 | {
246 | _obj: "align",
247 | _target: [
248 | {
249 | _enum: "ordinal",
250 | _ref: "layer",
251 | _value: "targetEnum",
252 | },
253 | ],
254 | alignToCanvas: false,
255 | using: {
256 | _enum: "alignDistributeSelector",
257 | _value: m,
258 | },
259 | },
260 | ];
261 | await action.batchPlay(commands, {});
262 | });
263 | };
264 |
265 | const generateImage = async (command) => {
266 | let options = command.options;
267 |
268 | await execute(async () => {
269 | let doc = app.activeDocument;
270 |
271 | await doc.selection.selectAll();
272 |
273 | let contentType = "none";
274 | const c = options.contentType.toLowerCase()
275 | if (c === "photo" || c === "art") {
276 | contentType = c;
277 | }
278 |
279 | let commands = [
280 | // Generate Image current document
281 | {
282 | _obj: "syntheticTextToImage",
283 | _target: [
284 | {
285 | _enum: "ordinal",
286 | _ref: "document",
287 | _value: "targetEnum",
288 | },
289 | ],
290 | documentID: doc.id,
291 | layerID: 0,
292 | prompt: options.prompt,
293 | serviceID: "clio",
294 | serviceOptionsList: {
295 | clio: {
296 | _obj: "clio",
297 | clio_advanced_options: {
298 | text_to_image_styles_options: {
299 | text_to_image_content_type: contentType,
300 | text_to_image_effects_count: 0,
301 | text_to_image_effects_list: [
302 | "none",
303 | "none",
304 | "none",
305 | ],
306 | },
307 | },
308 | dualCrop: true,
309 | gentech_workflow_name: "text_to_image",
310 | gi_ADVANCED: '{"enable_mts":true}',
311 | gi_CONTENT_PRESERVE: 0,
312 | gi_CROP: false,
313 | gi_DILATE: false,
314 | gi_ENABLE_PROMPT_FILTER: true,
315 | gi_GUIDANCE: 6,
316 | gi_MODE: "ginp",
317 | gi_NUM_STEPS: -1,
318 | gi_PROMPT: options.prompt,
319 | gi_SEED: -1,
320 | gi_SIMILARITY: 0,
321 | },
322 | },
323 | workflow: "text_to_image",
324 | workflowType: {
325 | _enum: "genWorkflow",
326 | _value: "text_to_image",
327 | },
328 | },
329 | // Rasterize current layer
330 | {
331 | _obj: "rasterizeLayer",
332 | _target: [
333 | {
334 | _enum: "ordinal",
335 | _ref: "layer",
336 | _value: "targetEnum",
337 | },
338 | ],
339 | },
340 | ];
341 | let o = await action.batchPlay(commands, {});
342 | let layerId = o[0].layerID;
343 |
344 | //let l = findLayerByName(options.prompt);
345 | let l = findLayer(layerId);
346 | l.name = options.layerName;
347 | });
348 | };
349 |
350 | const generativeFill = async (command) => {
351 | const options = command.options;
352 | const layerId = options.layerId;
353 | const prompt = options.prompt;
354 |
355 | const layer = findLayer(layerId);
356 |
357 | if (!layer) {
358 | throw new Error(
359 | `generativeFill : Could not find layerId : ${layerId}`
360 | );
361 | }
362 |
363 | if(!hasActiveSelection()) {
364 | throw new Error(
365 | `generativeFill : Requires an active selection.`
366 | );
367 | }
368 |
369 | await execute(async () => {
370 | let doc = app.activeDocument;
371 |
372 | let contentType = "none";
373 | const c = options.contentType.toLowerCase()
374 | if (c === "photo" || c === "art") {
375 | contentType = c;
376 | }
377 |
378 | let commands = [
379 | // Generative Fill current document
380 | {
381 | "_obj": "syntheticFill",
382 | "_target": [
383 | {
384 | "_enum": "ordinal",
385 | "_ref": "document",
386 | "_value": "targetEnum"
387 | }
388 | ],
389 | "documentID": doc.id,
390 | "layerID": layerId,
391 | "prompt": prompt,
392 | "serviceID": "clio",
393 | "serviceOptionsList": {
394 | "clio": {
395 | "_obj": "clio",
396 | "dualCrop": true,
397 | "gi_ADVANCED": "{\"enable_mts\":true}",
398 | "gi_CONTENT_PRESERVE": 0,
399 | "gi_CROP": false,
400 | "gi_DILATE": false,
401 | "gi_ENABLE_PROMPT_FILTER": true,
402 | "gi_GUIDANCE": 6,
403 | "gi_MODE": "tinp",
404 | "gi_NUM_STEPS": -1,
405 | "gi_PROMPT": prompt,
406 | "gi_SEED": -1,
407 | "gi_SIMILARITY": 0,
408 |
409 |
410 | clio_advanced_options: {
411 | text_to_image_styles_options: {
412 | text_to_image_content_type: contentType,
413 | text_to_image_effects_count: 0,
414 | text_to_image_effects_list: [
415 | "none",
416 | "none",
417 | "none",
418 | ],
419 | },
420 | },
421 |
422 | }
423 | },
424 | "serviceVersion": "clio3",
425 | "workflowType": {
426 | "_enum": "genWorkflow",
427 | "_value": "in_painting"
428 | },
429 | "workflow_to_active_service_identifier_map": {
430 | "gen_harmonize": "clio3",
431 | "generate_background": "clio3",
432 | "generate_similar": "clio3",
433 | "generativeUpscale": "fal_aura_sr",
434 | "in_painting": "clio3",
435 | "instruct_edit": "clio3",
436 | "out_painting": "clio3",
437 | "text_to_image": "clio3"
438 | }
439 | }
440 | ];
441 |
442 |
443 | let o = await action.batchPlay(commands, {});
444 | let id = o[0].layerID;
445 |
446 | //let l = findLayerByName(options.prompt);
447 | let l = findLayer(id);
448 | l.name = options.layerName;
449 | });
450 | };
451 |
452 | const saveDocument = async (command) => {
453 | await execute(async () => {
454 | await app.activeDocument.save();
455 | });
456 | };
457 |
458 | const saveDocumentAs = async (command) => {
459 | let options = command.options;
460 |
461 | return await _saveDocumentAs(options.filePath, options.fileType);
462 | };
463 |
464 | const setActiveDocument = async (command) => {
465 |
466 | let options = command.options;
467 | let documentId = options.documentId;
468 | let docs = listOpenDocuments();
469 |
470 | for (let doc of docs) {
471 | if (doc.id === documentId) {
472 | await execute(async () => {
473 | app.activeDocument = doc;
474 | });
475 |
476 | return
477 | }
478 | }
479 | }
480 |
481 | const getDocuments = async (command) => {
482 | return listOpenDocuments()
483 | }
484 |
485 | const duplicateDocument = async (command) => {
486 | let options = command.options;
487 | let name = options.name
488 |
489 | await execute(async () => {
490 | const doc = app.activeDocument;
491 | await doc.duplicate(name)
492 | });
493 | };
494 |
495 | const createDocument = async (command) => {
496 | let options = command.options;
497 | let colorMode = getNewDocumentMode(command.options.colorMode);
498 | let fillColor = parseColor(options.fillColor);
499 |
500 | await execute(async () => {
501 | await app.createDocument({
502 | typename: "DocumentCreateOptions",
503 | width: options.width,
504 | height: options.height,
505 | resolution: options.resolution,
506 | mode: colorMode,
507 | fill: constants.DocumentFill.COLOR,
508 | fillColor: fillColor,
509 | profile: "sRGB IEC61966-2.1",
510 | });
511 |
512 | let background = findLayerByName("Background");
513 | background.allLocked = false;
514 | background.name = "Background";
515 | });
516 | };
517 |
518 | const executeBatchPlayCommand = async (commands) => {
519 | let options = commands.options;
520 | let c = options.commands;
521 |
522 |
523 |
524 | let out = await execute(async () => {
525 | let o = await action.batchPlay(c, {});
526 | return o[0]
527 | });
528 |
529 | console.log(out)
530 | return out;
531 | }
532 |
533 | const commandHandlers = {
534 | generativeFill,
535 | executeBatchPlayCommand,
536 | setActiveDocument,
537 | getDocuments,
538 | duplicateDocument,
539 | getDocumentImage,
540 | openFile,
541 | placeImage,
542 | getDocumentInfo,
543 | cropDocument,
544 | removeBackground,
545 | alignContent,
546 | generateImage,
547 | saveDocument,
548 | saveDocumentAs,
549 | createDocument,
550 | };
551 |
552 | module.exports = {
553 | commandHandlers,
554 | };
555 |
```
--------------------------------------------------------------------------------
/uxp/pr/commands/core.js:
--------------------------------------------------------------------------------
```javascript
1 |
2 | const fs = require("uxp").storage.localFileSystem;
3 | const app = require("premierepro");
4 | const constants = require("premierepro").Constants;
5 |
6 | const {BLEND_MODES, TRACK_TYPE } = require("./consts.js")
7 |
8 | const {
9 | _getSequenceFromId,
10 | _setActiveSequence,
11 | setParam,
12 | getParam,
13 | addEffect,
14 | findProjectItem,
15 | execute,
16 | getTrack,
17 | getTrackItems
18 | } = require("./utils.js")
19 |
20 | const saveProject = async (command) => {
21 | let project = await app.Project.getActiveProject()
22 |
23 | project.save()
24 | }
25 |
26 | const saveProjectAs = async (command) => {
27 | let project = await app.Project.getActiveProject()
28 |
29 | const options = command.options;
30 | const filePath = options.filePath;
31 |
32 | project.saveAs(filePath)
33 | }
34 |
35 | const openProject = async (command) => {
36 |
37 | const options = command.options;
38 | const filePath = options.filePath;
39 |
40 | await app.Project.open(filePath);
41 | }
42 |
43 |
44 | const importMedia = async (command) => {
45 |
46 | let options = command.options
47 | let paths = command.options.filePaths
48 |
49 | let project = await app.Project.getActiveProject()
50 |
51 | let root = await project.getRootItem()
52 | let originalItems = await root.getItems()
53 |
54 | //import everything into root
55 | let rootFolderItems = await project.getRootItem()
56 |
57 |
58 | let success = await project.importFiles(paths, true, rootFolderItems)
59 | //TODO: what is not success?
60 |
61 | let updatedItems = await root.getItems()
62 |
63 | const addedItems = updatedItems.filter(
64 | updatedItem => !originalItems.some(originalItem => originalItem.name === updatedItem.name)
65 | );
66 |
67 | let addedProjectItems = [];
68 | for (const p of addedItems) {
69 | addedProjectItems.push({ name: p.name });
70 | }
71 |
72 | return { addedProjectItems };
73 | }
74 |
75 |
76 | //note: right now, we just always add to the active sequence. Need to add support
77 | //for specifying sequence
78 | const addMediaToSequence = async (command) => {
79 |
80 | let options = command.options
81 | let itemName = options.itemName
82 | let id = options.sequenceId
83 |
84 | let project = await app.Project.getActiveProject()
85 | let sequence = await _getSequenceFromId(id)
86 |
87 | let insertItem = await findProjectItem(itemName, project)
88 |
89 | let editor = await app.SequenceEditor.getEditor(sequence)
90 |
91 | const insertionTime = await app.TickTime.createWithTicks(options.insertionTimeTicks.toString());
92 | const videoTrackIndex = options.videoTrackIndex
93 | const audioTrackIndex = options.audioTrackIndex
94 |
95 | //not sure what this does
96 | const limitShift = false
97 |
98 | //let f = ((options.overwrite) ? editor.createOverwriteItemAction : editor.createInsertProjectItemAction).bind(editor)
99 | //let action = f(insertItem, insertionTime, videoTrackIndex, audioTrackIndex, limitShift)
100 | execute(() => {
101 | let action = editor.createOverwriteItemAction(insertItem, insertionTime, videoTrackIndex, audioTrackIndex)
102 | return [action]
103 | }, project)
104 | }
105 |
106 |
107 | const setAudioTrackMute = async (command) => {
108 |
109 | let options = command.options
110 | let id = options.sequenceId
111 |
112 | let sequence = await _getSequenceFromId(id)
113 |
114 | let track = await sequence.getTrack(options.audioTrackIndex, TRACK_TYPE.AUDIO)
115 | track.setMute(options.mute)
116 | }
117 |
118 |
119 |
120 | const setVideoClipProperties = async (command) => {
121 |
122 | const options = command.options
123 | let id = options.sequenceId
124 |
125 | let project = await app.Project.getActiveProject()
126 | let sequence = await _getSequenceFromId(id)
127 |
128 | if(!sequence) {
129 | throw new Error(`setVideoClipProperties : Requires an active sequence.`)
130 | }
131 |
132 | let trackItem = await getTrack(sequence, options.videoTrackIndex, options.trackItemIndex, TRACK_TYPE.VIDEO)
133 |
134 | let opacityParam = await getParam(trackItem, "AE.ADBE Opacity", "Opacity")
135 | let opacityKeyframe = await opacityParam.createKeyframe(options.opacity)
136 |
137 | let blendModeParam = await getParam(trackItem, "AE.ADBE Opacity", "Blend Mode")
138 |
139 | let mode = BLEND_MODES[options.blendMode.toUpperCase()]
140 | let blendModeKeyframe = await blendModeParam.createKeyframe(mode)
141 |
142 | execute(() => {
143 | let opacityAction = opacityParam.createSetValueAction(opacityKeyframe);
144 | let blendModeAction = blendModeParam.createSetValueAction(blendModeKeyframe);
145 | return [opacityAction, blendModeAction]
146 | }, project)
147 |
148 | // /AE.ADBE Opacity
149 | //Opacity
150 | //Blend Mode
151 |
152 | }
153 |
154 | const appendVideoFilter = async (command) => {
155 |
156 | let options = command.options
157 | let id = options.sequenceId
158 |
159 | let sequence = await _getSequenceFromId(id)
160 |
161 | if(!sequence) {
162 | throw new Error(`appendVideoFilter : Requires an active sequence.`)
163 | }
164 |
165 | let trackItem = await getTrackTrack(sequence, options.videoTrackIndex, options.trackItemIndex, TRACK_TYPE.VIDEO)
166 |
167 | let effectName = options.effectName
168 | let properties = options.properties
169 |
170 | let d = await addEffect(trackItem, effectName)
171 |
172 | for(const p of properties) {
173 | console.log(p.value)
174 | await setParam(trackItem, effectName, p.name, p.value)
175 | }
176 | }
177 |
178 |
179 | const setActiveSequence = async (command) => {
180 | let options = command.options
181 | let id = options.sequenceId
182 |
183 | let sequence = await _getSequenceFromId(id)
184 |
185 | await _setActiveSequence(sequence)
186 | }
187 |
188 | const createProject = async (command) => {
189 |
190 | let options = command.options
191 | let path = options.path
192 | let name = options.name
193 |
194 | if (!path.endsWith('/')) {
195 | path = path + '/';
196 | }
197 |
198 | //todo: this will open a dialog if directory doesnt exist
199 | let project = await app.Project.createProject(`${path}${name}.prproj`)
200 |
201 |
202 | if(!project) {
203 | throw new Error("createProject : Could not create project. Check that the directory path exists and try again.")
204 | }
205 |
206 | //create a default sequence and set it as active
207 | //let sequence = await project.createSequence("default")
208 | //await project.setActiveSequence(sequence)
209 | }
210 |
211 |
212 | const _exportFrame = async (sequence, filePath, seconds) => {
213 |
214 | const fileType = filePath.split('.').pop()
215 |
216 | let size = await sequence.getFrameSize()
217 |
218 | let p = window.path.parse(filePath)
219 | let t = app.TickTime.createWithSeconds(seconds)
220 |
221 | let out = await app.Exporter.exportSequenceFrame(sequence, t, p.base, p.dir, size.width, size.height)
222 |
223 | let ps = `${p.dir}${window.path.sep}${p.base}`
224 | let outPath = `${ps}.${fileType}`
225 |
226 | if(!out) {
227 | throw new Error(`exportFrame : Could not save frame to [${outPath}]`);
228 | }
229 |
230 | return outPath
231 | }
232 |
233 | const exportFrame = async (command) => {
234 | const options = command.options;
235 | let id = options.sequenceId;
236 | let filePath = options.filePath;
237 | let seconds = options.seconds;
238 |
239 | let sequence = await _getSequenceFromId(id);
240 |
241 | const outPath = await _exportFrame(sequence, filePath, seconds);
242 |
243 | return {"filePath": outPath}
244 | }
245 |
246 | const setClipDisabled = async (command) => {
247 |
248 | const options = command.options;
249 | const id = options.sequenceId;
250 | const trackIndex = options.trackIndex;
251 | const trackItemIndex = options.trackItemIndex;
252 | const trackType = options.trackType;
253 |
254 | let project = await app.Project.getActiveProject()
255 | let sequence = await _getSequenceFromId(id)
256 |
257 | if(!sequence) {
258 | throw new Error(`setClipDisabled : Requires an active sequence.`)
259 | }
260 |
261 | let trackItem = await getTrack(sequence, trackIndex, trackItemIndex, trackType)
262 |
263 | execute(() => {
264 | let action = trackItem.createSetDisabledAction(options.disabled)
265 | return [action]
266 | }, project)
267 |
268 | }
269 |
270 |
271 | const appendVideoTransition = async (command) => {
272 |
273 | let options = command.options
274 | let id = options.sequenceId
275 |
276 | let project = await app.Project.getActiveProject()
277 | let sequence = await _getSequenceFromId(id)
278 |
279 | if(!sequence) {
280 | throw new Error(`appendVideoTransition : Requires an active sequence.`)
281 | }
282 |
283 | let trackItem = await getTrack(sequence, options.videoTrackIndex, options.trackItemIndex,TRACK_TYPE.VIDEO)
284 |
285 | let transition = await app.TransitionFactory.createVideoTransition(options.transitionName);
286 |
287 | let transitionOptions = new app.AddTransitionOptions()
288 | transitionOptions.setApplyToStart(false)
289 |
290 | const time = await app.TickTime.createWithSeconds(options.duration)
291 | transitionOptions.setDuration(time)
292 | transitionOptions.setTransitionAlignment(options.clipAlignment)
293 |
294 | execute(() => {
295 | let action = trackItem.createAddVideoTransitionAction(transition, transitionOptions)
296 | return [action]
297 | }, project)
298 | }
299 |
300 |
301 | const getProjectInfo = async (command) => {
302 | return {}
303 | }
304 |
305 |
306 |
307 | const createSequenceFromMedia = async (command) => {
308 |
309 | let options = command.options
310 |
311 | let itemNames = options.itemNames
312 | let sequenceName = options.sequenceName
313 |
314 | let project = await app.Project.getActiveProject()
315 |
316 | let found = false
317 | try {
318 | await findProjectItem(sequenceName, project)
319 | found = true
320 | } catch {
321 | //do nothing
322 | }
323 |
324 | if(found) {
325 | throw Error(`createSequenceFromMedia : sequence name [${sequenceName}] is already in use`)
326 | }
327 |
328 | let items = []
329 | for (const name of itemNames) {
330 |
331 | //this is a little inefficient
332 | let insertItem = await findProjectItem(name, project)
333 | items.push(insertItem)
334 | }
335 |
336 |
337 | let root = await project.getRootItem()
338 |
339 | let sequence = await project.createSequenceFromMedia(sequenceName, items, root)
340 |
341 | await _setActiveSequence(sequence)
342 | }
343 |
344 | const setClipStartEndTimes = async (command) => {
345 | const options = command.options;
346 |
347 | const sequenceId = options.sequenceId;
348 | const trackIndex = options.trackIndex;
349 | const trackItemIndex = options.trackItemIndex;
350 | const startTimeTicks = options.startTimeTicks;
351 | const endTimeTicks = options.endTimeTicks;
352 | const trackType = options.trackType
353 |
354 | const sequence = await _getSequenceFromId(sequenceId)
355 | let trackItem = await getTrack(sequence, trackIndex, trackItemIndex, trackType)
356 |
357 | const startTick = await app.TickTime.createWithTicks(startTimeTicks.toString());
358 | const endTick = await app.TickTime.createWithTicks(endTimeTicks.toString());;
359 |
360 | let project = await app.Project.getActiveProject();
361 |
362 | execute(() => {
363 |
364 | let out = []
365 |
366 | out.push(trackItem.createSetStartAction(startTick));
367 | out.push(trackItem.createSetEndAction(endTick))
368 |
369 | return out
370 | }, project)
371 | }
372 |
373 | const closeGapsOnSequence = async(command) => {
374 | const options = command.options
375 | const sequenceId = options.sequenceId;
376 | const trackIndex = options.trackIndex;
377 | const trackType = options.trackType;
378 |
379 | let sequence = await _getSequenceFromId(sequenceId)
380 |
381 | let out = await _closeGapsOnSequence(sequence, trackIndex, trackType)
382 |
383 | return out
384 | }
385 |
386 | const _closeGapsOnSequence = async (sequence, trackIndex, trackType) => {
387 |
388 | let project = await app.Project.getActiveProject()
389 |
390 | let items = await getTrackItems(sequence, trackIndex, trackType)
391 |
392 | if(!items || items.length === 0) {
393 | return;
394 | }
395 |
396 | const f = async (item, targetPosition) => {
397 | let currentStart = await item.getStartTime()
398 |
399 | let a = await currentStart.ticksNumber
400 | let b = await targetPosition.ticksNumber
401 | let shiftAmount = (a - b)// How much to shift
402 |
403 | shiftAmount *= -1;
404 |
405 | let shiftTick = app.TickTime.createWithTicks(shiftAmount.toString())
406 |
407 | return shiftTick
408 | }
409 |
410 | let targetPosition = app.TickTime.createWithTicks("0")
411 |
412 |
413 | for(let i = 0; i < items.length; i++) {
414 | let item = items[i];
415 | let shiftTick = await f(item, targetPosition)
416 |
417 | execute(() => {
418 | let out = []
419 |
420 | out.push(item.createMoveAction(shiftTick))
421 |
422 | return out
423 | }, project)
424 |
425 | targetPosition = await item.getEndTime()
426 | }
427 | }
428 |
429 | //TODO: change API to take trackType?
430 |
431 | //TODO: pass in scope here
432 | const removeItemFromSequence = async (command) => {
433 | const options = command.options;
434 |
435 | const sequenceId = options.sequenceId;
436 | const trackIndex = options.trackIndex;
437 | const trackItemIndex = options.trackItemIndex;
438 | const rippleDelete = options.rippleDelete;
439 | const trackType = options.trackType
440 |
441 | let project = await app.Project.getActiveProject()
442 | let sequence = await _getSequenceFromId(sequenceId)
443 |
444 | if(!sequence) {
445 | throw Error(`addMarkerToSequence : sequence with id [${sequenceId}] not found.`)
446 | }
447 |
448 | let item = await getTrack(sequence, trackIndex, trackItemIndex, trackType);
449 |
450 | let editor = await app.SequenceEditor.getEditor(sequence)
451 |
452 | let trackItemSelection = await sequence.getSelection();
453 | let items = await trackItemSelection.getTrackItems()
454 |
455 | for (let t of items) {
456 | await trackItemSelection.removeItem(t)
457 | }
458 |
459 | trackItemSelection.addItem(item, true)
460 |
461 | execute(() => {
462 | const shiftOverlapping = false
463 | let action = editor.createRemoveItemsAction(trackItemSelection, rippleDelete, constants.MediaType.ANY, shiftOverlapping )
464 | return [action]
465 | }, project)
466 | }
467 |
468 | const addMarkerToSequence = async (command) => {
469 | const options = command.options;
470 | const sequenceId = options.sequenceId;
471 | const markerName = options.markerName;
472 | const startTimeTicks = options.startTimeTicks;
473 | const durationTicks = options.durationTicks;
474 | const comments = options.comments;
475 |
476 | const sequence = await _getSequenceFromId(sequenceId)
477 |
478 | if(!sequence) {
479 | throw Error(`addMarkerToSequence : sequence with id [${sequenceId}] not found.`)
480 | }
481 |
482 | let markers = await app.Markers.getMarkers(sequence);
483 |
484 | let project = await app.Project.getActiveProject()
485 |
486 | execute(() => {
487 |
488 | let start = app.TickTime.createWithTicks(startTimeTicks.toString())
489 | let duration = app.TickTime.createWithTicks(durationTicks.toString())
490 |
491 | let action = markers.createAddMarkerAction(markerName, "WebLink", start, duration, comments)
492 | return [action]
493 | }, project)
494 |
495 | }
496 |
497 | const moveProjectItemsToBin = async (command) => {
498 | const options = command.options;
499 | const binName = options.binName;
500 | const projectItemNames = options.itemNames;
501 |
502 | const project = await app.Project.getActiveProject()
503 |
504 | const binFolderItem = await findProjectItem(binName, project)
505 |
506 | if(!binFolderItem) {
507 | throw Error(`moveProjectItemsToBin : Bin with name [${binName}] not found.`)
508 | }
509 |
510 | let folderItems = [];
511 |
512 | for(let name of projectItemNames) {
513 | let item = await findProjectItem(name, project)
514 |
515 | if(!item) {
516 | throw Error(`moveProjectItemsToBin : FolderItem with name [${name}] not found.`)
517 | }
518 |
519 | folderItems.push(item)
520 | }
521 |
522 | const rootFolderItem = await project.getRootItem()
523 |
524 | execute(() => {
525 |
526 | let actions = []
527 |
528 | for(let folderItem of folderItems) {
529 | let b = app.FolderItem.cast(binFolderItem)
530 | let action = rootFolderItem.createMoveItemAction(folderItem, b)
531 | actions.push(action)
532 | }
533 |
534 | return actions
535 | }, project)
536 |
537 | }
538 |
539 | const createBinInActiveProject = async (command) => {
540 | const options = command.options;
541 | const binName = options.binName;
542 |
543 | const project = await app.Project.getActiveProject()
544 | const folderItem = await project.getRootItem()
545 |
546 | execute(() => {
547 | let action = folderItem.createBinAction(binName, true)
548 | return [action]
549 | }, project)
550 | }
551 |
552 | const exportSequence = async (command) => {
553 | const options = command.options;
554 | const sequenceId = options.sequenceId;
555 | const outputPath = options.outputPath;
556 | const presetPath = options.presetPath;
557 |
558 | const manager = await app.EncoderManager.getManager();
559 |
560 | const sequence = await _getSequenceFromId(sequenceId);
561 |
562 | await manager.exportSequence(sequence, constants.ExportType.IMMEDIATELY, outputPath, presetPath);
563 | }
564 |
565 | const commandHandlers = {
566 | exportSequence,
567 | moveProjectItemsToBin,
568 | createBinInActiveProject,
569 | addMarkerToSequence,
570 | closeGapsOnSequence,
571 | removeItemFromSequence,
572 | setClipStartEndTimes,
573 | openProject,
574 | saveProjectAs,
575 | saveProject,
576 | getProjectInfo,
577 | setActiveSequence,
578 | exportFrame,
579 | setVideoClipProperties,
580 | createSequenceFromMedia,
581 | setAudioTrackMute,
582 | setClipDisabled,
583 | appendVideoTransition,
584 | appendVideoFilter,
585 | addMediaToSequence,
586 | importMedia,
587 | createProject,
588 | };
589 |
590 | module.exports = {
591 | commandHandlers
592 | }
```
--------------------------------------------------------------------------------
/uxp/ps/commands/layers.js:
--------------------------------------------------------------------------------
```javascript
1 | /* MIT License
2 | *
3 | * Copyright (c) 2025 Mike Chambers
4 | *
5 | * Permission is hereby granted, free of charge, to any person obtaining a copy
6 | * of this software and associated documentation files (the "Software"), to deal
7 | * in the Software without restriction, including without limitation the rights
8 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | * copies of the Software, and to permit persons to whom the Software is
10 | * furnished to do so, subject to the following conditions:
11 | *
12 | * The above copyright notice and this permission notice shall be included in all
13 | * copies or substantial portions of the Software.
14 | *
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | * SOFTWARE.
22 | */
23 |
24 | const { app, constants, action, imaging } = require("photoshop");
25 | const fs = require("uxp").storage.localFileSystem;
26 |
27 | const {
28 | setVisibleAllLayers,
29 | findLayer,
30 | execute,
31 | parseColor,
32 | getAnchorPosition,
33 | getInterpolationMethod,
34 | getBlendMode,
35 | getJustificationMode,
36 | selectLayer,
37 | hasActiveSelection,
38 | _saveDocumentAs,
39 | convertFontSize,
40 | convertFromPhotoshopFontSize
41 | } = require("./utils");
42 |
43 |
44 | // Function to capture visibility state
45 | const _captureVisibilityState = (layers) => {
46 | const state = new Map();
47 |
48 | const capture = (layerSet) => {
49 | for (const layer of layerSet) {
50 | state.set(layer.id, layer.visible);
51 | if (layer.layers && layer.layers.length > 0) {
52 | capture(layer.layers);
53 | }
54 | }
55 | };
56 |
57 | capture(layers);
58 | return state;
59 | };
60 |
61 | // Function to restore visibility state
62 | const _restoreVisibilityState = async (state) => {
63 | const restore = (layerSet) => {
64 | for (const layer of layerSet) {
65 | if (state.has(layer.id)) {
66 | layer.visible = state.get(layer.id);
67 | }
68 |
69 | if (layer.layers && layer.layers.length > 0) {
70 | restore(layer.layers);
71 | }
72 | }
73 | };
74 |
75 | await execute(async () => {
76 | restore(app.activeDocument.layers);
77 | });
78 | };
79 |
80 | const exportLayersAsPng = async (command) => {
81 | let options = command.options;
82 | let layersInfo = options.layersInfo;
83 |
84 | const results = [];
85 |
86 |
87 | let originalState;
88 | await execute(async () => {
89 | originalState = _captureVisibilityState(app.activeDocument.layers);
90 | setVisibleAllLayers(false);
91 | });
92 |
93 | for (const info of layersInfo) {
94 | let result = {};
95 |
96 | let layer = findLayer(info.layerId);
97 |
98 | try {
99 | if (!layer) {
100 | throw new Error(
101 | `exportLayersAsPng: Could not find layer with ID: [${info.layerId}]` // Fixed error message
102 | );
103 | }
104 | await execute(async () => {
105 | layer.visible = true;
106 | });
107 |
108 | let tmp = await _saveDocumentAs(info.filePath, "PNG");
109 |
110 | result = {
111 | ...tmp,
112 | layerId: info.layerId,
113 | success: true
114 | };
115 |
116 | } catch (e) {
117 | result = {
118 | ...info,
119 | success: false,
120 | message: e.message
121 | };
122 | } finally {
123 | if (layer) {
124 | await execute(async () => {
125 | layer.visible = false;
126 | });
127 | }
128 | }
129 |
130 | results.push(result);
131 | }
132 |
133 | await execute(async () => {
134 | await _restoreVisibilityState(originalState);
135 | })
136 |
137 | return results;
138 | };
139 |
140 | const scaleLayer = async (command) => {
141 | let options = command.options;
142 |
143 | let layerId = options.layerId;
144 | let layer = findLayer(layerId);
145 |
146 | if (!layer) {
147 | throw new Error(
148 | `scaleLayer : Could not find layer with ID : [${layerId}]`
149 | );
150 | }
151 |
152 | await execute(async () => {
153 | let anchor = getAnchorPosition(options.anchorPosition);
154 | let interpolation = getInterpolationMethod(options.interpolationMethod);
155 |
156 | await layer.scale(options.width, options.height, anchor, {
157 | interpolation: interpolation,
158 | });
159 | });
160 | };
161 |
162 | const rotateLayer = async (command) => {
163 | let options = command.options;
164 |
165 | let layerId = options.layerId;
166 | let layer = findLayer(layerId);
167 |
168 | if (!layer) {
169 | throw new Error(
170 | `rotateLayer : Could not find layer with ID : [${layerId}]`
171 | );
172 | }
173 |
174 | await execute(async () => {
175 | selectLayer(layer, true);
176 |
177 | let anchor = getAnchorPosition(options.anchorPosition);
178 | let interpolation = getInterpolationMethod(options.interpolationMethod);
179 |
180 | await layer.rotate(options.angle, anchor, {
181 | interpolation: interpolation,
182 | });
183 | });
184 | };
185 |
186 | const flipLayer = async (command) => {
187 | let options = command.options;
188 |
189 | let layerId = options.layerId;
190 | let layer = findLayer(layerId);
191 |
192 | if (!layer) {
193 | throw new Error(
194 | `flipLayer : Could not find layer with ID : [${layerId}]`
195 | );
196 | }
197 |
198 | await execute(async () => {
199 | await layer.flip(options.axis);
200 | });
201 | };
202 |
203 | const deleteLayer = async (command) => {
204 | let options = command.options;
205 |
206 | let layerId = options.layerId;
207 | let layer = findLayer(layerId);
208 |
209 | if (!layer) {
210 | throw new Error(
211 | `setLayerVisibility : Could not find layer with ID : [${layerId}]`
212 | );
213 | }
214 |
215 | await execute(async () => {
216 | layer.delete();
217 | });
218 | };
219 |
220 | const renameLayer = async (command) => {
221 | let options = command.options;
222 |
223 | let layerId = options.layerId;
224 | let newLayerName = options.newLayerName;
225 |
226 | await _renameLayer(layerId, newLayerName)
227 | };
228 |
229 | const _renameLayer = async (layerId, layerName) => {
230 |
231 | let layer = findLayer(layerId);
232 |
233 | if (!layer) {
234 | throw new Error(
235 | `_renameLayer : Could not find layer with ID : [${layerId}]`
236 | );
237 | }
238 |
239 | await execute(async () => {
240 | layer.name = layerName;
241 | });
242 | }
243 |
244 | const renameLayers = async (command) => {
245 | let options = command.options;
246 |
247 | let data = options.layerData;
248 |
249 | for(const d of data) {
250 | await _renameLayer(d.layer_id, d.new_layer_name)
251 | }
252 | };
253 |
254 | const groupLayers = async (command) => {
255 | let options = command.options;
256 | const layerIds = options.layerIds;
257 |
258 | let layers = [];
259 |
260 | for (const layerId of layerIds) {
261 |
262 | let layer = findLayer(layerId);
263 |
264 | if (!layer) {
265 | throw new Error(
266 | `groupLayers : Could not find layerId : ${layerId}`
267 | );
268 | }
269 |
270 | layers.push(layer);
271 | }
272 |
273 | await execute(async () => {
274 | await app.activeDocument.createLayerGroup({
275 | name: options.groupName,
276 | fromLayers: layers,
277 | });
278 | });
279 | };
280 |
281 | const setLayerVisibility = async (command) => {
282 | let options = command.options;
283 |
284 | let layerId = options.layerId;
285 | let layer = findLayer(layerId);
286 |
287 | if (!layer) {
288 | throw new Error(
289 | `setLayerVisibility : Could not find layer with ID : [${layerId}]`
290 | );
291 | }
292 |
293 | await execute(async () => {
294 | layer.visible = options.visible;
295 | });
296 | };
297 |
298 | const translateLayer = async (command) => {
299 | let options = command.options;
300 |
301 | let layerId = options.layerId;
302 | let layer = findLayer(layerId);
303 |
304 | if (!layer) {
305 | throw new Error(
306 | `translateLayer : Could not find layer with ID : [${layerId}]`
307 | );
308 | }
309 |
310 | await execute(async () => {
311 | await layer.translate(options.xOffset, options.yOffset);
312 | });
313 | };
314 |
315 | const setLayerProperties = async (command) => {
316 | let options = command.options;
317 |
318 | let layerId = options.layerId;
319 | let layer = findLayer(layerId);
320 |
321 | if (!layer) {
322 | throw new Error(
323 | `setLayerProperties : Could not find layer with ID : [${layerId}]`
324 | );
325 | }
326 |
327 | await execute(async () => {
328 | layer.blendMode = getBlendMode(options.blendMode);
329 | layer.opacity = options.layerOpacity;
330 | layer.fillOpacity = options.fillOpacity;
331 |
332 | if (layer.isClippingMask != options.isClippingMask) {
333 | selectLayer(layer, true);
334 | let command = options.isClippingMask
335 | ? {
336 | _obj: "groupEvent",
337 | _target: [
338 | {
339 | _enum: "ordinal",
340 | _ref: "layer",
341 | _value: "targetEnum",
342 | },
343 | ],
344 | }
345 | : {
346 | _obj: "ungroup",
347 | _target: [
348 | {
349 | _enum: "ordinal",
350 | _ref: "layer",
351 | _value: "targetEnum",
352 | },
353 | ],
354 | };
355 |
356 | await action.batchPlay([command], {});
357 | }
358 | });
359 | };
360 |
361 | const duplicateLayer = async (command) => {
362 | let options = command.options;
363 |
364 | await execute(async () => {
365 | let layer = findLayer(options.sourceLayerId);
366 |
367 | if (!layer) {
368 | throw new Error(
369 | `duplicateLayer : Could not find sourceLayerId : ${options.sourceLayerId}`
370 | );
371 | }
372 |
373 | let d = await layer.duplicate();
374 | d.name = options.duplicateLayerName;
375 | });
376 | };
377 |
378 | const flattenAllLayers = async (command) => {
379 | const options = command.options;
380 | const layerName = options.layerName
381 |
382 | await execute(async () => {
383 | await app.activeDocument.flatten();
384 |
385 | let layers = app.activeDocument.layers;
386 |
387 | if (layers.length != 1) {
388 | throw new Error(`flattenAllLayers : Unknown error`);
389 | }
390 |
391 | let l = layers[0];
392 | l.allLocked = false;
393 | l.name = layerName;
394 | });
395 | };
396 |
397 | const getLayerBounds = async (command) => {
398 | let options = command.options;
399 | let layerId = options.layerId;
400 |
401 | let layer = findLayer(layerId);
402 |
403 | if (!layer) {
404 | throw new Error(
405 | `getLayerBounds : Could not find layerId : ${layerId}`
406 | );
407 | }
408 |
409 | let b = layer.bounds;
410 | return { left: b.left, top: b.top, bottom: b.bottom, right: b.right };
411 | };
412 |
413 | const rasterizeLayer = async (command) => {
414 | let options = command.options;
415 | let layerId = options.layerId;
416 |
417 | let layer = findLayer(layerId);
418 |
419 | if (!layer) {
420 | throw new Error(
421 | `rasterizeLayer : Could not find layerId : ${layerId}`
422 | );
423 | }
424 |
425 | await execute(async () => {
426 | layer.rasterize(constants.RasterizeType.ENTIRELAYER);
427 | });
428 | };
429 |
430 | const editTextLayer = async (command) => {
431 | let options = command.options;
432 |
433 | let layerId = options.layerId;
434 | let layer = findLayer(layerId);
435 |
436 | if (!layer) {
437 | throw new Error(`editTextLayer : Could not find layerId : ${layerId}`);
438 | }
439 |
440 | if (layer.kind.toUpperCase() != constants.LayerKind.TEXT.toUpperCase()) {
441 | throw new Error(`editTextLayer : Layer type must be TEXT : ${layer.kind}`);
442 | }
443 |
444 | await execute(async () => {
445 | const contents = options.contents;
446 | const fontSize = options.fontSize;
447 | const textColor = options.textColor;
448 | const fontName = options.fontName;
449 |
450 |
451 | console.log("contents", options.contents)
452 | console.log("fontSize", options.fontSize)
453 | console.log("textColor", options.textColor)
454 | console.log("fontName", options.fontName)
455 |
456 | if (contents != undefined) {
457 | layer.textItem.contents = contents;
458 | }
459 |
460 | if (fontSize != undefined) {
461 | let s = convertFontSize(fontSize);
462 | layer.textItem.characterStyle.size = s;
463 | }
464 |
465 | if (textColor != undefined) {
466 | let c = parseColor(textColor);
467 | layer.textItem.characterStyle.color = c;
468 | }
469 |
470 | if (fontName != undefined) {
471 | layer.textItem.characterStyle.font = fontName;
472 | }
473 | });
474 | }
475 |
476 | const moveLayer = async (command) => {
477 | let options = command.options;
478 |
479 | let layerId = options.layerId;
480 | let layer = findLayer(layerId);
481 |
482 | if (!layer) {
483 | throw new Error(`moveLayer : Could not find layerId : ${layerId}`);
484 | }
485 |
486 | let position;
487 | switch (options.position) {
488 | case "TOP":
489 | position = "front";
490 | break;
491 | case "BOTTOM":
492 | position = "back";
493 | break;
494 | case "UP":
495 | position = "next";
496 | break;
497 | case "DOWN":
498 | position = "previous";
499 | break;
500 | default:
501 | throw new Error(
502 | `moveLayer: Unknown placement : ${options.position}`
503 | );
504 | }
505 |
506 | await execute(async () => {
507 | selectLayer(layer, true);
508 |
509 | let commands = [
510 | {
511 | _obj: "move",
512 | _target: [
513 | {
514 | _enum: "ordinal",
515 | _ref: "layer",
516 | _value: "targetEnum",
517 | },
518 | ],
519 | to: {
520 | _enum: "ordinal",
521 | _ref: "layer",
522 | _value: position,
523 | },
524 | },
525 | ];
526 |
527 | await action.batchPlay(commands, {});
528 | });
529 | };
530 |
531 | const createMultiLineTextLayer = async (command) => {
532 | let options = command.options;
533 |
534 | await execute(async () => {
535 | let c = parseColor(options.textColor);
536 |
537 | let fontSize = convertFontSize(options.fontSize);
538 |
539 | let contents = options.contents.replace(/\\n/g, "\n");
540 |
541 | let a = await app.activeDocument.createTextLayer({
542 | //blendMode: constants.BlendMode.DISSOLVE,//ignored
543 | textColor: c,
544 | //color:constants.LabelColors.BLUE,//ignored
545 | //opacity:50, //ignored
546 | //name: "layer name",//ignored
547 | contents: contents,
548 | fontSize: fontSize,
549 | fontName: options.fontName, //"ArialMT",
550 | position: options.position, //y is the baseline of the text. Not top left
551 | });
552 |
553 | //https://developer.adobe.com/photoshop/uxp/2022/ps_reference/classes/layer/
554 |
555 | a.blendMode = getBlendMode(options.blendMode);
556 | a.name = options.layerName;
557 | a.opacity = options.opacity;
558 |
559 | await a.textItem.convertToParagraphText();
560 | a.textItem.paragraphStyle.justification = getJustificationMode(
561 | options.justification
562 | );
563 |
564 | selectLayer(a, true);
565 | let commands = [
566 | // Set current text layer
567 | {
568 | _obj: "set",
569 | _target: [
570 | {
571 | _enum: "ordinal",
572 | _ref: "textLayer",
573 | _value: "targetEnum",
574 | },
575 | ],
576 | to: {
577 | _obj: "textLayer",
578 |
579 | textShape: [
580 | {
581 | _obj: "textShape",
582 | bounds: {
583 | _obj: "rectangle",
584 | bottom: options.bounds.bottom,
585 | left: options.bounds.left,
586 | right: options.bounds.right,
587 | top: options.bounds.top,
588 | },
589 | char: {
590 | _enum: "char",
591 | _value: "box",
592 | },
593 | columnCount: 1,
594 | columnGutter: {
595 | _unit: "pointsUnit",
596 | _value: 0.0,
597 | },
598 | firstBaselineMinimum: {
599 | _unit: "pointsUnit",
600 | _value: 0.0,
601 | },
602 | frameBaselineAlignment: {
603 | _enum: "frameBaselineAlignment",
604 | _value: "alignByAscent",
605 | },
606 | orientation: {
607 | _enum: "orientation",
608 | _value: "horizontal",
609 | },
610 | rowCount: 1,
611 | rowGutter: {
612 | _unit: "pointsUnit",
613 | _value: 0.0,
614 | },
615 | rowMajorOrder: true,
616 | spacing: {
617 | _unit: "pointsUnit",
618 | _value: 0.0,
619 | },
620 | transform: {
621 | _obj: "transform",
622 | tx: 0.0,
623 | ty: 0.0,
624 | xx: 1.0,
625 | xy: 0.0,
626 | yx: 0.0,
627 | yy: 1.0,
628 | },
629 | },
630 | ],
631 | },
632 | },
633 | ];
634 |
635 | a.textItem.contents = contents;
636 | await action.batchPlay(commands, {});
637 | });
638 | };
639 |
640 | const createSingleLineTextLayer = async (command) => {
641 | let options = command.options;
642 |
643 | await execute(async () => {
644 | let c = parseColor(options.textColor);
645 |
646 | let fontSize = convertFontSize(options.fontSize);
647 |
648 | let a = await app.activeDocument.createTextLayer({
649 | //blendMode: constants.BlendMode.DISSOLVE,//ignored
650 | textColor: c,
651 | //color:constants.LabelColors.BLUE,//ignored
652 | //opacity:50, //ignored
653 | //name: "layer name",//ignored
654 | contents: options.contents,
655 | fontSize: fontSize,
656 | fontName: options.fontName, //"ArialMT",
657 | position: options.position, //y is the baseline of the text. Not top left
658 | });
659 |
660 | //https://developer.adobe.com/photoshop/uxp/2022/ps_reference/classes/layer/
661 |
662 | a.blendMode = getBlendMode(options.blendMode);
663 | a.name = options.layerName;
664 | a.opacity = options.opacity;
665 | });
666 | };
667 |
668 | const createPixelLayer = async (command) => {
669 | let options = command.options;
670 |
671 | await execute(async () => {
672 | //let c = parseColor(options.textColor)
673 |
674 | let b = getBlendMode(options.blendMode);
675 |
676 | let a = await app.activeDocument.createPixelLayer({
677 | name: options.layerName,
678 | opacity: options.opacity,
679 | fillNeutral: options.fillNeutral,
680 | blendMode: b,
681 | });
682 | });
683 | };
684 |
685 |
686 | const getLayers = async (command) => {
687 | let out = await execute(async () => {
688 | let result = [];
689 |
690 | // Function to recursively process layers
691 | const processLayers = (layersList) => {
692 | let layersArray = [];
693 |
694 | for (let i = 0; i < layersList.length; i++) {
695 | let layer = layersList[i];
696 |
697 | let kind = layer.kind.toUpperCase()
698 |
699 | let layerInfo = {
700 | name: layer.name,
701 | type: kind,
702 | id: layer.id,
703 | isClippingMask: layer.isClippingMask,
704 | opacity: Math.round(layer.opacity),
705 | blendMode: layer.blendMode.toUpperCase(),
706 | };
707 |
708 | if (kind == constants.LayerKind.TEXT.toUpperCase()) {
709 |
710 | let _c = layer.textItem.characterStyle.color;
711 | let color = {
712 | red: Math.round(_c.rgb.red),
713 | green: Math.round(_c.rgb.green),
714 | blue: Math.round(_c.rgb.blue)
715 | }
716 |
717 | layerInfo.textInfo = {
718 | fontSize: convertFromPhotoshopFontSize(layer.textItem.characterStyle.size),
719 | fontName: layer.textItem.characterStyle.font,
720 | fontColor: color,
721 | text: layer.textItem.contents,
722 | isMultiLineText: layer.textItem.isParagraphText
723 | }
724 | }
725 |
726 |
727 | // Check if this layer has sublayers (is a group)
728 | if (layer.layers && layer.layers.length > 0) {
729 | layerInfo.layers = processLayers(layer.layers);
730 | }
731 |
732 | layersArray.push(layerInfo);
733 | }
734 |
735 | return layersArray;
736 | };
737 |
738 | // Start with the top-level layers
739 | result = processLayers(app.activeDocument.layers);
740 |
741 | return result;
742 | });
743 |
744 | return out;
745 | };
746 |
747 | const removeLayerMask = async (command) => {
748 | const options = command.options;
749 |
750 | const layerId = options.layerId;
751 | const layer = findLayer(layerId);
752 |
753 | if (!layer) {
754 | throw new Error(`removeLayerMask : Could not find layerId : ${layerId}`);
755 | }
756 |
757 | await execute(async () => {
758 | selectLayer(layer, true);
759 |
760 | let commands = [
761 | // Delete mask channel
762 | {
763 | _obj: "delete",
764 | _target: [
765 | {
766 | _enum: "channel",
767 | _ref: "channel",
768 | _value: "mask",
769 | },
770 | ],
771 | },
772 | ];
773 | await action.batchPlay(commands, {});
774 | });
775 | };
776 |
777 | const addLayerMask = async (command) => {
778 | if (!hasActiveSelection()) {
779 | throw new Error("addLayerMask : Requires an active selection.");
780 | }
781 |
782 | const options = command.options;
783 |
784 | const layerId = options.layerId;
785 | const layer = findLayer(layerId);
786 |
787 | if (!layer) {
788 | throw new Error(`addLayerMask : Could not find layerId : ${layerId}`);
789 | }
790 |
791 | await execute(async () => {
792 | selectLayer(layer, true);
793 |
794 | let commands = [
795 | // Make
796 | {
797 | _obj: "make",
798 | at: {
799 | _enum: "channel",
800 | _ref: "channel",
801 | _value: "mask",
802 | },
803 | new: {
804 | _class: "channel",
805 | },
806 | using: {
807 | _enum: "userMaskEnabled",
808 | _value: "revealSelection",
809 | },
810 | },
811 | ];
812 |
813 | await action.batchPlay(commands, {});
814 | });
815 | };
816 |
817 | const harmonizeLayer = async (command) => {
818 | const options = command.options;
819 |
820 | const layerId = options.layerId;
821 | const newLayerName = options.newLayerName;
822 | const rasterizeLayer = options.rasterizeLayer;
823 |
824 | const layer = findLayer(layerId);
825 |
826 | if (!layer) {
827 | throw new Error(`harmonizeLayer : Could not find layerId : ${layerId}`);
828 | }
829 |
830 | await execute(async () => {
831 | selectLayer(layer, true);
832 |
833 | let commands = [
834 | {
835 | "_obj": "syntheticGenHarmonize",
836 | "_target": [
837 | {
838 | "_enum": "ordinal",
839 | "_ref": "document",
840 | "_value": "targetEnum"
841 | }
842 | ],
843 | "documentID": 60,
844 | "layerID": 7,
845 | "prompt": "",
846 | "serviceID": "gen_harmonize",
847 | "serviceOptionsList": {
848 | "clio": {
849 | "_obj": "clio",
850 | "dualCrop": true,
851 | "gi_ADVANCED": "{\"enable_mts\":true}",
852 | "gi_CONTENT_PRESERVE": 0,
853 | "gi_CROP": false,
854 | "gi_DILATE": false,
855 | "gi_ENABLE_PROMPT_FILTER": true,
856 | "gi_GUIDANCE": 6,
857 | "gi_MODE": "ginp",
858 | "gi_NUM_STEPS": -1,
859 | "gi_PROMPT": "",
860 | "gi_SEED": -1,
861 | "gi_SIMILARITY": 0
862 | },
863 | "gen_harmonize": {
864 | "_obj": "gen_harmonize",
865 | "dualCrop": true,
866 | "gi_SEED": -1
867 | }
868 | },
869 | "workflow": "gen_harmonize",
870 | "workflowType": {
871 | "_enum": "genWorkflow",
872 | "_value": "gen_harmonize"
873 | },
874 | "workflow_to_active_service_identifier_map": {
875 | "gen_harmonize": "gen_harmonize",
876 | "generate_background": "clio3",
877 | "generate_similar": "clio3",
878 | "generativeUpscale": "fal_aura_sr",
879 | "in_painting": "gen_harmonize",
880 | "instruct_edit": "clio3",
881 | "out_painting": "clio3",
882 | "text_to_image": "clio3"
883 | }
884 | },
885 |
886 | ];
887 |
888 |
889 | console.log(rasterizeLayer)
890 | if(rasterizeLayer) {
891 | commands.push({
892 | _obj: "rasterizeLayer",
893 | _target: [
894 | {
895 | _enum: "ordinal",
896 | _ref: "layer",
897 | _value: "targetEnum",
898 | },
899 | ],
900 | })
901 | }
902 |
903 | let o = await action.batchPlay(commands, {});
904 | let layerId = o[0].layerID;
905 |
906 | let l = findLayer(layerId);
907 | l.name = newLayerName;
908 | });
909 | };
910 |
911 | const getLayerImage = async (command) => {
912 |
913 | const options = command.options;
914 | const layerId = options.layerId;
915 |
916 | const layer = findLayer(layerId);
917 |
918 | if (!layer) {
919 | throw new Error(`harmonizeLayer : Could not find layerId : ${layerId}`);
920 | }
921 |
922 | let out = await execute(async () => {
923 |
924 | const pixelsOpt = {
925 | applyAlpha: true,
926 | layerID:layerId
927 | };
928 |
929 | const imgObj = await imaging.getPixels(pixelsOpt);
930 |
931 | const base64Data = await imaging.encodeImageData({
932 | imageData: imgObj.imageData,
933 | base64: true,
934 | });
935 |
936 | const result = {
937 | base64Image: base64Data,
938 | dataUrl: `data:image/jpeg;base64,${base64Data}`,
939 | width: imgObj.imageData.width,
940 | height: imgObj.imageData.height,
941 | colorSpace: imgObj.imageData.colorSpace,
942 | components: imgObj.imageData.components,
943 | format: "jpeg",
944 | };
945 |
946 | imgObj.imageData.dispose();
947 | return result;
948 | });
949 |
950 | return out;
951 | };
952 |
953 | const commandHandlers = {
954 | renameLayers,
955 | getLayerImage,
956 | harmonizeLayer,
957 | editTextLayer,
958 | exportLayersAsPng,
959 | removeLayerMask,
960 | addLayerMask,
961 | getLayers,
962 | scaleLayer,
963 | rotateLayer,
964 | flipLayer,
965 | deleteLayer,
966 | renameLayer,
967 | groupLayers,
968 | setLayerVisibility,
969 | translateLayer,
970 | setLayerProperties,
971 | duplicateLayer,
972 | flattenAllLayers,
973 | getLayerBounds,
974 | rasterizeLayer,
975 | moveLayer,
976 | createMultiLineTextLayer,
977 | createSingleLineTextLayer,
978 | createPixelLayer,
979 | };
980 |
981 | module.exports = {
982 | commandHandlers,
983 | };
984 |
```
--------------------------------------------------------------------------------
/mcp/pr-mcp.py:
--------------------------------------------------------------------------------
```python
1 | # MIT License
2 | #
3 | # Copyright (c) 2025 Mike Chambers
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in all
13 | # copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | # SOFTWARE.
22 |
23 | from mcp.server.fastmcp import FastMCP, Image
24 | from PIL import Image as PILImage
25 |
26 | from core import init, sendCommand, createCommand
27 | import socket_client
28 | import sys
29 | import tempfile
30 | import os
31 | import io
32 |
33 |
34 | #logger.log(f"Python path: {sys.executable}")
35 | #logger.log(f"PYTHONPATH: {os.environ.get('PYTHONPATH')}")
36 | #logger.log(f"Current working directory: {os.getcwd()}")
37 | #logger.log(f"Sys.path: {sys.path}")
38 |
39 |
40 | mcp_name = "Adobe Premiere MCP Server"
41 | mcp = FastMCP(mcp_name, log_level="ERROR")
42 | print(f"{mcp_name} running on stdio", file=sys.stderr)
43 |
44 | APPLICATION = "premiere"
45 | PROXY_URL = 'http://localhost:3001'
46 | PROXY_TIMEOUT = 20
47 |
48 | socket_client.configure(
49 | app=APPLICATION,
50 | url=PROXY_URL,
51 | timeout=PROXY_TIMEOUT
52 | )
53 |
54 | init(APPLICATION, socket_client)
55 |
56 | @mcp.tool()
57 | def get_project_info():
58 | """
59 | Returns info on the currently active project in Premiere Pro.
60 | """
61 |
62 | command = createCommand("getProjectInfo", {
63 | })
64 |
65 | return sendCommand(command)
66 |
67 | @mcp.tool()
68 | def save_project():
69 | """
70 | Saves the active project in Premiere Pro.
71 | """
72 |
73 | command = createCommand("saveProject", {
74 | })
75 |
76 | return sendCommand(command)
77 |
78 | @mcp.tool()
79 | def save_project_as(file_path: str):
80 | """Saves the current Premiere project to the specified location.
81 |
82 | Args:
83 | file_path (str): The absolute path (including filename) where the file will be saved.
84 | Example: "/Users/username/Documents/project.prproj"
85 |
86 | """
87 |
88 | command = createCommand("saveProjectAs", {
89 | "filePath":file_path
90 | })
91 |
92 | return sendCommand(command)
93 |
94 | @mcp.tool()
95 | def open_project(file_path: str):
96 | """Opens the Premiere project at the specified path.
97 |
98 | Args:
99 | file_path (str): The absolute path (including filename) of the Premiere Pro project to open.
100 | Example: "/Users/username/Documents/project.prproj"
101 |
102 | """
103 |
104 | command = createCommand("openProject", {
105 | "filePath":file_path
106 | })
107 |
108 | return sendCommand(command)
109 |
110 |
111 | @mcp.tool()
112 | def create_project(directory_path: str, project_name: str):
113 | """
114 | Create a new Premiere project.
115 |
116 | Creates a new Adobe Premiere project file, saves it to the specified location and then opens it in Premiere.
117 |
118 | The function initializes an empty project with default settings.
119 |
120 | Args:
121 | directory_path (str): The full path to the directory where the project file will be saved. This directory must exist before calling the function.
122 | project_name (str): The name to be given to the project file. The '.prproj' extension will be added.
123 | """
124 |
125 | command = createCommand("createProject", {
126 | "path":directory_path,
127 | "name":project_name
128 | })
129 |
130 | return sendCommand(command)
131 |
132 |
133 | @mcp.tool()
134 | def create_bin_in_active_project(bin_name:str):
135 | """
136 | Creates a new bin / folder in the root project.
137 |
138 | Args:
139 | name (str) : The name of the bin to be created
140 |
141 |
142 | """
143 |
144 | command = createCommand("createBinInActiveProject", {
145 | "binName": bin_name
146 | })
147 |
148 | return sendCommand(command)
149 |
150 | @mcp.tool()
151 | def export_sequence(sequence_id: str, output_path: str, preset_path: str):
152 | """
153 | Exports a Premiere Pro sequence to a video file using specified export settings.
154 |
155 | This function renders and exports the specified sequence from the active Premiere Pro project
156 | to a video file on the file system. The export process uses a preset file to determine
157 | encoding settings, resolution, format, and other export parameters.
158 |
159 | Args:
160 | sequence_id (str): The unique identifier of the sequence to export.
161 | This should be the ID of an existing sequence in the current Premiere Pro project.
162 |
163 | output_path (str): The complete file system path where the exported video will be saved.
164 | Must include the full directory path, filename, and appropriate file extension.
165 |
166 | preset_path (str): The file system path to the export preset file (.epr) that defines the export settings including codec, resolution, bitrate, and format.
167 |
168 | IMPORTANT: The export may take an extended period of time, so if the call times out, it most likely means the export is still in progress.
169 | """
170 | command = createCommand("exportSequence", {
171 | "sequenceId": sequence_id,
172 | "outputPath": output_path,
173 | "presetPath": preset_path
174 | })
175 |
176 | return sendCommand(command)
177 |
178 | @mcp.tool()
179 | def move_project_items_to_bin(item_names: list[str], bin_name: str):
180 | """
181 | Moves specified project items to an existing bin/folder in the project.
182 |
183 | Args:
184 | item_names (list[str]): A list of names of project items to move to the specified bin.
185 | These should be the exact names of items as they appear in the project.
186 | bin_name (str): The name of the existing bin to move the project items to.
187 | The bin must already exist in the project.
188 |
189 | Returns:
190 | dict: Response from the Premiere Pro operation indicating success status.
191 |
192 | Raises:
193 | RuntimeError: If the bin doesn't exist, items don't exist, or the operation fails.
194 |
195 | Example:
196 | move_project_items_to_bin(
197 | item_names=["video1.mp4", "audio1.wav", "image1.png"],
198 | bin_name="Media Assets"
199 | )
200 | """
201 | command = createCommand("moveProjectItemsToBin", {
202 | "itemNames": item_names,
203 | "binName": bin_name
204 | })
205 |
206 | return sendCommand(command)
207 |
208 | @mcp.tool()
209 | def set_audio_track_mute(sequence_id:str, audio_track_index: int, mute: bool):
210 | """
211 | Sets the mute property on the specified audio track. If mute is true, all clips on the track will be muted and not played.
212 |
213 | Args:
214 | sequence_id (str) : The id of the sequence on which to set the audio track mute.
215 | audio_track_index (int): The index of the audio track to mute or unmute. Indices start at 0 for the first audio track.
216 | mute (bool): Whether the track should be muted.
217 | - True: Mutes the track (audio will not be played)
218 | - False: Unmutes the track (audio will be played normally)
219 |
220 | """
221 |
222 | command = createCommand("setAudioTrackMute", {
223 | "sequenceId": sequence_id,
224 | "audioTrackIndex":audio_track_index,
225 | "mute":mute
226 | })
227 |
228 | return sendCommand(command)
229 |
230 |
231 | @mcp.tool()
232 | def set_active_sequence(sequence_id: str):
233 | """
234 | Sets the sequence with the specified id as the active sequence within Premiere Pro (currently selected and visible in timeline)
235 |
236 | Args:
237 | sequence_id (str): ID for the sequence to be set as active
238 | """
239 |
240 | command = createCommand("setActiveSequence", {
241 | "sequenceId":sequence_id
242 | })
243 |
244 | return sendCommand(command)
245 |
246 |
247 | @mcp.tool()
248 | def create_sequence_from_media(item_names: list[str], sequence_name: str = "default"):
249 | """
250 | Creates a new sequence from the specified project items, placing clips on the timeline in the order they are provided.
251 |
252 | If there is not an active sequence the newly created sequence will be set as the active sequence when created.
253 |
254 | Args:
255 | item_names (list[str]): A list of project item names to include in the sequence in the desired order.
256 | sequence_name (str, optional): The name to give the new sequence. Defaults to "default".
257 | """
258 |
259 |
260 | command = createCommand("createSequenceFromMedia", {
261 | "itemNames":item_names,
262 | "sequenceName":sequence_name
263 | })
264 |
265 | return sendCommand(command)
266 |
267 | @mcp.tool()
268 | def close_gaps_on_sequence(sequence_id: str, track_index: int, track_type: str):
269 | """
270 | Closes gaps on the specified track(s) in a sequence's timeline.
271 |
272 | This function removes empty spaces (gaps) between clips on the timeline by moving
273 | clips leftward to fill any empty areas. This is useful for cleaning up the timeline
274 | after removing clips or when clips have been moved leaving gaps.
275 |
276 | Args:
277 | sequence_id (str): The ID of the sequence to close gaps on.
278 | track_index (int): The index of the track to close gaps on.
279 | Track indices start at 0 for the first track and increment upward.
280 | For video tracks, this refers to video track indices.
281 | For audio tracks, this refers to audio track indices.
282 | track_type (str): Specifies which type of tracks to close gaps on.
283 | Valid values:
284 | - "VIDEO": Close gaps only on the specified video track
285 | - "AUDIO": Close gaps only on the specified audio track
286 |
287 | """
288 |
289 | command = createCommand("closeGapsOnSequence", {
290 | "sequenceId": sequence_id,
291 | "trackIndex": track_index,
292 | "trackType": track_type,
293 | })
294 |
295 | return sendCommand(command)
296 |
297 |
298 | @mcp.tool()
299 | def remove_item_from_sequence(sequence_id: str, track_index:int, track_item_index: int, track_type:str, ripple_delete:bool=True):
300 | """
301 | Removes a specified media item from the sequence's timeline.
302 |
303 | Args:
304 | sequence_id (str): The id for the sequence to remove the media from
305 | track_index (int): The index of the track containing the target clip.
306 | Track indices start at 0 for the first track and increment upward.
307 | track_item_index (int): The index of the clip within the track to remove.
308 | Clip indices start at 0 for the first clip in the track and increment from left to right.
309 | track_type (str): Specifies which type of tracks being removed.
310 | Valid values:
311 | - "VIDEO": Close gaps only on the specified video track
312 | - "AUDIO": Close gaps only on the specified audio track
313 | ripple_delete (bool, optional): Whether to perform a ripple delete operation. Defaults to True.
314 | - True: Removes the clip and shifts all subsequent clips leftward to close the gap
315 | - False: Removes the clip but leaves a gap in the timeline where the clip was located
316 | """
317 |
318 | command = createCommand("removeItemFromSequence", {
319 | "sequenceId": sequence_id,
320 | "trackItemIndex":track_item_index,
321 | "trackIndex":track_index,
322 | "trackType":track_type,
323 | "rippleDelete":ripple_delete
324 | })
325 |
326 | return sendCommand(command)
327 |
328 | @mcp.tool()
329 | def add_marker_to_sequence(sequence_id: str,
330 | marker_name: str,
331 | start_time_ticks: int,
332 | duration_ticks: int,
333 | comments: str,
334 | marker_type: str = "Comment"):
335 | """
336 | Adds a marker to the specified sequence.
337 |
338 | Args:
339 | sequence_id (str):
340 | The ID of the sequence to which the marker will be added.
341 |
342 | marker_name (str):
343 | The name/title of the marker.
344 |
345 | start_time_ticks (int):
346 | The timeline position where the marker starts, in ticks.
347 | (1 tick = 1/254016000000 of a day)
348 |
349 | duration_ticks (int):
350 | The length of the marker in ticks.
351 |
352 | comments (str):
353 | Optional text comment to store in the marker.
354 |
355 | marker_type (str, optional):
356 | The type of marker to add. Defaults to "Comment".
357 |
358 | Supported marker types include:
359 | - "Comment" → General-purpose note marker.
360 |
361 | """
362 |
363 | command = createCommand("addMarkerToSequence", {
364 | "sequenceId": sequence_id,
365 | "markerName": marker_name,
366 | "startTimeTicks": start_time_ticks,
367 | "durationTicks": duration_ticks,
368 | "comments": comments,
369 | "markerType": marker_type
370 | })
371 |
372 | return sendCommand(command)
373 |
374 |
375 |
376 | @mcp.tool()
377 | def add_media_to_sequence(sequence_id:str, item_name: str, video_track_index: int, audio_track_index: int, insertion_time_ticks: int = 0, overwrite: bool = True):
378 | """
379 | Adds a specified media item to the active sequence's timeline.
380 |
381 | Args:
382 | sequence_id (str) : The id for the sequence to add the media to
383 | item_name (str): The name or identifier of the media item to add.
384 | video_track_index (int): The index of the video track where the item should be inserted.
385 | audio_track_index (int): The index of the audio track where the item should be inserted.
386 | insertion_time_ticks (int): The position on the timeline in ticks, with 0 being the beginning. The API will return positions of existing clips in ticks
387 | overwrite (bool, optional): Whether to overwrite existing content at the insertion point. Defaults to True. If False, any existing clips that overlap will be split and item inserted.
388 | """
389 |
390 |
391 | command = createCommand("addMediaToSequence", {
392 | "sequenceId": sequence_id,
393 | "itemName":item_name,
394 | "videoTrackIndex":video_track_index,
395 | "audioTrackIndex":audio_track_index,
396 | "insertionTimeTicks":insertion_time_ticks,
397 | "overwrite":overwrite
398 | })
399 |
400 | return sendCommand(command)
401 |
402 |
403 | @mcp.tool()
404 | def set_clip_disabled(sequence_id:str, track_index: int, track_item_index: int, track_type:str, disabled: bool):
405 | """
406 | Enables or disables a clip in the timeline.
407 |
408 | Args:
409 | sequence_id (str): The id for the sequence to set the clip disabled property.
410 | track_index (int): The index of the track containing the target clip.
411 | Track indices start at 0 for the first track and increment upward.
412 | For video tracks, this refers to video track indices.
413 | For audio tracks, this refers to audio track indices.
414 | track_item_index (int): The index of the clip within the track to enable/disable.
415 | Clip indices start at 0 for the first clip in the track and increment from left to right.
416 | track_type (str): Specifies which type of track to modify.
417 | Valid values:
418 | - "VIDEO": Modify clips on the specified video track
419 | - "AUDIO": Modify clips on the specified audio track
420 | disabled (bool): Whether to disable the clip.
421 | - True: Disables the clip (clip will not be visible during playback or export)
422 | - False: Enables the clip (normal visibility)
423 | """
424 |
425 | command = createCommand("setClipDisabled", {
426 | "sequenceId": sequence_id,
427 | "trackIndex":track_index,
428 | "trackItemIndex":track_item_index,
429 | "trackType":track_type,
430 | "disabled":disabled
431 | })
432 |
433 | return sendCommand(command)
434 |
435 |
436 | @mcp.tool()
437 | def set_clip_start_end_times(
438 | sequence_id: str, track_index: int, track_item_index: int, start_time_ticks: int,
439 | end_time_ticks: int, track_type: str):
440 | """
441 | Sets the start and end time boundaries for a specified clip in the timeline.
442 |
443 | This function allows you to modify the duration and timing of video clips, audio clips,
444 | and images that are already placed in the timeline by adjusting their in and out points.
445 | The clip can be trimmed to a shorter duration or extended to a longer duration.
446 |
447 | Args:
448 | sequence_id (str): The id for the sequence containing the clip to modify.
449 | track_index (int): The index of the track containing the target clip.
450 | Track indices start at 0 for the first track and increment upward.
451 | For video tracks, this refers to video track indices.
452 | For audio tracks, this refers to audio track indices.
453 | track_item_index (int): The index of the clip within the track to modify.
454 | Clip indices start at 0 for the first clip in the track and increment from left to right.
455 | start_time_ticks (int): The new start time for the clip in ticks.
456 | end_time_ticks (int): The new end time for the clip in ticks.
457 | track_type (str): Specifies which type of tracks to modify clips on.
458 | Valid values:
459 | - "VIDEO": Modify clips only on the specified video track
460 | - "AUDIO": Modify clips only on the specified audio track
461 |
462 | Note:
463 | - To trim a clip: Set start/end times within the original clip's duration
464 | - To extend a clip: Set end time beyond the original clip's duration
465 | - Works with video clips, audio clips, and image files (like PSD files)
466 | - Times are specified in ticks (Premiere Pro's internal time unit)
467 | """
468 |
469 | command = createCommand("setClipStartEndTimes", {
470 | "sequenceId": sequence_id,
471 | "trackIndex": track_index,
472 | "trackItemIndex": track_item_index,
473 | "startTimeTicks": start_time_ticks,
474 | "endTimeTicks": end_time_ticks,
475 | "trackType": track_type
476 | })
477 |
478 | return sendCommand(command)
479 |
480 | @mcp.tool()
481 | def add_black_and_white_effect(sequence_id:str, video_track_index: int, track_item_index: int):
482 | """
483 | Adds a black and white effect to a clip at the specified track and position.
484 |
485 | Args:
486 | sequence_id (str) : The id for the sequence to add the effect to
487 | video_track_index (int): The index of the video track containing the target clip.
488 | Track indices start at 0 for the first video track and increment upward.
489 | track_item_index (int): The index of the clip within the track to apply the effect to.
490 | Clip indices start at 0 for the first clip in the track and increment from left to right.
491 | """
492 |
493 | command = createCommand("appendVideoFilter", {
494 | "sequenceId": sequence_id,
495 | "videoTrackIndex":video_track_index,
496 | "trackItemIndex":track_item_index,
497 | "effectName":"AE.ADBE Black & White",
498 | "properties":[
499 | ]
500 | })
501 |
502 | return sendCommand(command)
503 |
504 | @mcp.tool()
505 | def get_sequence_frame_image(sequence_id: str, seconds: int):
506 | """Returns a jpeg of the specified timestamp in the specified sequence in Premiere pro as an MCP Image object that can be displayed."""
507 |
508 | temp_dir = tempfile.gettempdir()
509 | file_path = os.path.join(temp_dir, f"frame_{sequence_id}_{seconds}.png")
510 |
511 | command = createCommand("exportFrame", {
512 | "sequenceId": sequence_id,
513 | "filePath": file_path,
514 | "seconds": seconds
515 | })
516 |
517 | result = sendCommand(command)
518 |
519 | if not result.get("status") == "SUCCESS":
520 | return result
521 |
522 | file_path = result["response"]["filePath"]
523 |
524 | with open(file_path, 'rb') as f:
525 | png_image = PILImage.open(f)
526 |
527 | # Convert to RGB if necessary (removes alpha channel)
528 | if png_image.mode in ("RGBA", "LA", "P"):
529 | rgb_image = PILImage.new("RGB", png_image.size, (255, 255, 255))
530 | rgb_image.paste(png_image, mask=png_image.split()[-1] if png_image.mode == "RGBA" else None)
531 | png_image = rgb_image
532 |
533 | # Save as JPEG to bytes buffer
534 | jpeg_buffer = io.BytesIO()
535 | png_image.save(jpeg_buffer, format="JPEG", quality=85, optimize=True)
536 | jpeg_bytes = jpeg_buffer.getvalue()
537 |
538 | image = Image(data=jpeg_bytes, format="jpeg")
539 |
540 | del result["response"]
541 |
542 | try:
543 | os.remove(file_path)
544 | except FileNotFoundError:
545 | pass
546 |
547 | return [result, image]
548 |
549 | @mcp.tool()
550 | def export_frame(sequence_id:str, file_path: str, seconds: int):
551 | """Captures a specific frame from the sequence at the given timestamp
552 | and exports it as a PNG or JPG (depending on file extension) image file to the specified path.
553 |
554 | Args:
555 | sequence_id (str) : The id for the sequence to export the frame from
556 | file_path (str): The destination path where the exported PNG / JPG image will be saved.
557 | Must include the full directory path and filename with .png or .jpg extension.
558 | seconds (int): The timestamp in seconds from the beginning of the sequence
559 | where the frame should be captured. The frame closest to this time position
560 | will be extracted.
561 | """
562 |
563 | command = createCommand("exportFrame", {
564 | "sequenceId": sequence_id,
565 | "filePath": file_path,
566 | "seconds":seconds
567 | }
568 | )
569 |
570 | return sendCommand(command)
571 |
572 |
573 | @mcp.tool()
574 | def add_gaussian_blur_effect(sequence_id: str, video_track_index: int, track_item_index: int, blurriness: float, blur_dimensions: str = "HORIZONTAL_VERTICAL"):
575 | """
576 | Adds a gaussian blur effect to a clip at the specified track and position.
577 |
578 | Args:
579 | sequence_id (str) : The id for the sequence to add the effect to
580 | video_track_index (int): The index of the video track containing the target clip.
581 | Track indices start at 0 for the first video track and increment upward.
582 |
583 | track_item_index (int): The index of the clip within the track to apply the effect to.
584 | Clip indices start at 0 for the first clip in the track and increment from left to right.
585 |
586 | blurriness (float): The intensity of the blur effect. Higher values create stronger blur.
587 | Recommended range is between 0.0 and 100.0 (Max 3000).
588 |
589 | blur_dimensions (str, optional): The direction of the blur effect. Defaults to "HORIZONTAL_VERTICAL".
590 | Valid options are:
591 | - "HORIZONTAL_VERTICAL": Blur in all directions
592 | - "HORIZONTAL": Blur only horizontally
593 | - "VERTICAL": Blur only vertically
594 | """
595 | dimensions = {"HORIZONTAL_VERTICAL": 0, "HORIZONTAL": 1, "VERTICAL": 2}
596 |
597 | # Validate blur_dimensions parameter
598 | if blur_dimensions not in dimensions:
599 | raise ValueError(f"Invalid blur_dimensions. ")
600 |
601 | command = createCommand("appendVideoFilter", {
602 | "sequenceId": sequence_id,
603 | "videoTrackIndex": video_track_index,
604 | "trackItemIndex": track_item_index,
605 | "effectName": "AE.ADBE Gaussian Blur 2",
606 | "properties": [
607 | {"name": "Blur Dimensions", "value": dimensions[blur_dimensions]},
608 | {"name": "Blurriness", "value": blurriness}
609 | ]
610 | })
611 |
612 | return sendCommand(command)
613 |
614 | def rgb_to_premiere_color3(rgb_color, alpha=1.0):
615 | """Converts RGB (0–255) dict to Premiere Pro color format [r, g, b, a] with floats (0.0–1.0)."""
616 | return [
617 | rgb_color["red"] / 255.0,
618 | rgb_color["green"] / 255.0,
619 | rgb_color["blue"] / 255.0,
620 | alpha
621 | ]
622 |
623 | def rgb_to_premiere_color(rgb_color, alpha=255):
624 | """
625 | Converts an RGB(A) dict (0–255) to a 64-bit Premiere Pro color parameter (as int).
626 | Matches Adobe's internal ARGB 16-bit fixed-point format.
627 | """
628 | def to16bit(value):
629 | return int(round(value * 256))
630 |
631 | r16 = to16bit(rgb_color["red"] / 255.0)
632 | g16 = to16bit(rgb_color["green"] / 255.0)
633 | b16 = to16bit(rgb_color["blue"] / 255.0)
634 | a16 = to16bit(alpha / 255.0)
635 |
636 | high = (a16 << 16) | r16 # top 32 bits: A | R
637 | low = (g16 << 16) | b16 # bottom 32 bits: G | B
638 |
639 | packed_color = (high << 32) | low
640 | return packed_color
641 |
642 |
643 |
644 | @mcp.tool()
645 | def add_tint_effect(sequence_id: str, video_track_index: int, track_item_index: int, black_map:dict = {"red":0, "green":0, "blue":0}, white_map:dict = {"red":255, "green":255, "blue":255}, amount:int = 100):
646 | """
647 | Adds the tint effect to a clip at the specified track and position.
648 |
649 | This function applies a tint effect that maps the dark and light areas of the clip to specified colors.
650 |
651 | Args:
652 | sequence_id (str) : The id for the sequence to add the effect to
653 | video_track_index (int): The index of the video track containing the target clip.
654 | Track indices start at 0 for the first video track and increment upward.
655 |
656 | track_item_index (int): The index of the clip within the track to apply the effect to.
657 | Clip indices start at 0 for the first clip in the track and increment from left to right.
658 |
659 | black_map (dict): The RGB color values to map black/dark areas to, with keys "red", "green", and "blue".
660 | Default is {"red":0, "green":0, "blue":0} (pure black).
661 |
662 | white_map (dict): The RGB color values to map white/light areas to, with keys "red", "green", and "blue".
663 | Default is {"red":255, "green":255, "blue":255} (pure white).
664 |
665 | amount (int): The intensity of the tint effect as a percentage, ranging from 0 to 100.
666 | Default is 100 (full tint effect).
667 | """
668 |
669 | command = createCommand("appendVideoFilter", {
670 | "sequenceId": sequence_id,
671 | "videoTrackIndex":video_track_index,
672 | "trackItemIndex":track_item_index,
673 | "effectName":"AE.ADBE Tint",
674 | "properties":[
675 | #{"name":"Map White To", "value":rgb_to_premiere_color(white_map)},
676 | #{"name":"Map Black To", "value":rgb_to_premiere_color(black_map)}
677 | {"name":"Map Black To", "value":rgb_to_premiere_color(black_map)}
678 | #{"name":"Amount to Tint", "value":amount / 100}
679 | ]
680 | })
681 |
682 | return sendCommand(command)
683 |
684 |
685 |
686 | @mcp.tool()
687 | def add_motion_blur_effect(sequence_id: str, video_track_index: int, track_item_index: int, direction: int, length: int):
688 | """
689 | Adds the directional blur effect to a clip at the specified track and position.
690 |
691 | This function applies a motion blur effect that simulates movement in a specific direction.
692 |
693 | Args:
694 | sequence_id (str) : The id for the sequence to add the effect to
695 | video_track_index (int): The index of the video track containing the target clip.
696 | Track indices start at 0 for the first video track and increment upward.
697 |
698 | track_item_index (int): The index of the clip within the track to apply the effect to.
699 | Clip indices start at 0 for the first clip in the track and increment from left to right.
700 |
701 | direction (int): The angle of the directional blur in degrees, ranging from 0 to 360.
702 | - 0/360: Vertical blur upward
703 | - 90: Horizontal blur to the right
704 | - 180: Vertical blur downward
705 | - 270: Horizontal blur to the left
706 |
707 | length (int): The intensity or distance of the blur effect, ranging from 0 to 1000.
708 | """
709 |
710 | command = createCommand("appendVideoFilter", {
711 | "sequenceId": sequence_id,
712 | "videoTrackIndex":video_track_index,
713 | "trackItemIndex":track_item_index,
714 | "effectName":"AE.ADBE Motion Blur",
715 | "properties":[
716 | {"name":"Direction", "value":direction},
717 | {"name":"Blur Length", "value":length}
718 | ]
719 | })
720 |
721 | return sendCommand(command)
722 |
723 | @mcp.tool()
724 | def append_video_transition(sequence_id: str, video_track_index: int, track_item_index: int, transition_name: str, duration: float = 1.0, clip_alignment: float = 0.5):
725 | """
726 | Creates a transition between the specified clip and the adjacent clip on the timeline.
727 |
728 | In general, you should keep transitions short (no more than 2 seconds is a good rule).
729 |
730 | Args:
731 | sequence_id (str) : The id for the sequence to add the transition to
732 | video_track_index (int): The index of the video track containing the target clips.
733 | track_item_index (int): The index of the clip within the track to apply the transition to.
734 | transition_name (str): The name of the transition to apply. Must be a valid transition name (see below).
735 | duration (float): The duration of the transition in seconds.
736 | clip_alignment (float): Controls how the transition is distributed between the two clips.
737 | Range: 0.0 to 1.0, where:
738 | - 0.0 places transition entirely on the right (later) clip
739 | - 0.5 centers the transition equally between both clips (default)
740 | - 1.0 places transition entirely on the left (earlier) clip
741 |
742 | Valid Transition Names:
743 | Basic Transitions (ADBE):
744 | - "ADBE Additive Dissolve"
745 | - "ADBE Cross Zoom"
746 | - "ADBE Cube Spin"
747 | - "ADBE Film Dissolve"
748 | - "ADBE Flip Over"
749 | - "ADBE Gradient Wipe"
750 | - "ADBE Iris Cross"
751 | - "ADBE Iris Diamond"
752 | - "ADBE Iris Round"
753 | - "ADBE Iris Square"
754 | - "ADBE Page Peel"
755 | - "ADBE Push"
756 | - "ADBE Slide"
757 | - "ADBE Wipe"
758 |
759 | After Effects Transitions (AE.ADBE):
760 | - "AE.ADBE Center Split"
761 | - "AE.ADBE Inset"
762 | - "AE.ADBE Cross Dissolve New"
763 | - "AE.ADBE Dip To White"
764 | - "AE.ADBE Split"
765 | - "AE.ADBE Whip"
766 | - "AE.ADBE Non-Additive Dissolve"
767 | - "AE.ADBE Dip To Black"
768 | - "AE.ADBE Barn Doors"
769 | - "AE.ADBE MorphCut"
770 | """
771 |
772 | command = createCommand("appendVideoTransition", {
773 | "sequenceId": sequence_id,
774 | "videoTrackIndex":video_track_index,
775 | "trackItemIndex":track_item_index,
776 | "transitionName":transition_name,
777 | "clipAlignment":clip_alignment,
778 | "duration":duration
779 | })
780 |
781 | return sendCommand(command)
782 |
783 |
784 | @mcp.tool()
785 | def set_video_clip_properties(sequence_id: str, video_track_index: int, track_item_index: int, opacity: int = 100, blend_mode: str = "NORMAL"):
786 | """
787 | Sets opacity and blend mode properties for a video clip in the timeline.
788 |
789 | This function modifies the visual properties of a specific clip located on a specific video track
790 | in the active Premiere Pro sequence. The clip is identified by its track index and item index
791 | within that track.
792 |
793 | Args:
794 | sequence_id (str) : The id for the sequence to set the video clip properties
795 | video_track_index (int): The index of the video track containing the target clip.
796 | Track indices start at 0 for the first video track.
797 | track_item_index (int): The index of the clip within the track to modify.
798 | Clip indices start at 0 for the first clip on the track.
799 | opacity (int, optional): The opacity value to set for the clip, as a percentage.
800 | Valid values range from 0 (completely transparent) to 100 (completely opaque).
801 | Defaults to 100.
802 | blend_mode (str, optional): The blend mode to apply to the clip.
803 | Must be one of the valid blend modes supported by Premiere Pro.
804 | Defaults to "NORMAL".
805 | """
806 |
807 | command = createCommand("setVideoClipProperties", {
808 | "sequenceId": sequence_id,
809 | "videoTrackIndex":video_track_index,
810 | "trackItemIndex":track_item_index,
811 | "opacity":opacity,
812 | "blendMode":blend_mode
813 | })
814 |
815 | return sendCommand(command)
816 |
817 | @mcp.tool()
818 | def import_media(file_paths:list):
819 | """
820 | Imports a list of media files into the active Premiere project.
821 |
822 | Args:
823 | file_paths (list): A list of file paths (strings) to import into the project.
824 | Each path should be a complete, valid path to a media file supported by Premiere Pro.
825 | """
826 |
827 | command = createCommand("importMedia", {
828 | "filePaths":file_paths
829 | })
830 |
831 | return sendCommand(command)
832 |
833 | @mcp.resource("config://get_instructions")
834 | def get_instructions() -> str:
835 | """Read this first! Returns information and instructions on how to use Photoshop and this API"""
836 |
837 | return f"""
838 | You are a Premiere Pro and video expert who is creative and loves to help other people learn to use Premiere and create.
839 |
840 | Rules to follow:
841 |
842 | 1. Think deeply about how to solve the task
843 | 2. Always check your work
844 | 3. Read the info for the API calls to make sure you understand the requirements and arguments
845 | 4. In general, add clips first, then effects, then transitions
846 | 5. As a general rule keep transitions short (no more that 2 seconds is a good rule), and there should not be a gap between clips (or else the transition may not work)
847 |
848 | IMPORTANT: To create a new project and add clips:
849 | 1. Create new project (create_project)
850 | 2. Add media to the project (import_media)
851 | 3. Create a new sequence with media (should always add video / image clips before audio.(create_sequence_from_media). This will create a sequence with the clips.
852 | 4. The first clip you add will determine the dimensions / resolution of the sequence
853 |
854 | Here are some general tips for when working with Premiere.
855 |
856 | Audio and Video clips are added on separate Audio / Video tracks, which you can access via their index.
857 |
858 | When adding a video clip that contains audio, the audio will be placed on a separate audio track.
859 |
860 | Once added you currently cannot remove a clip (audio or video) but you can disable it.
861 |
862 | If you want to do a transition between two clips, the clips must be on the same track and there should not be a gap between them. Place the transition of the first clip.
863 |
864 | Video clips with a higher track index will overlap and hide those with lower index if they overlap.
865 |
866 | When adding images to a sequence, they will have a duration of 5 seconds.
867 |
868 | blend_modes: {", ".join(BLEND_MODES)}
869 | """
870 |
871 |
872 | BLEND_MODES = [
873 | "COLOR",
874 | "COLORBURN",
875 | "COLORDODGE",
876 | "DARKEN",
877 | "DARKERCOLOR",
878 | "DIFFERENCE",
879 | "DISSOLVE",
880 | "EXCLUSION",
881 | "HARDLIGHT",
882 | "HARDMIX",
883 | "HUE",
884 | "LIGHTEN",
885 | "LIGHTERCOLOR",
886 | "LINEARBURN",
887 | "LINEARDODGE",
888 | "LINEARLIGHT",
889 | "LUMINOSITY",
890 | "MULTIPLY",
891 | "NORMAL",
892 | "OVERLAY",
893 | "PINLIGHT",
894 | "SATURATION",
895 | "SCREEN",
896 | "SOFTLIGHT",
897 | "VIVIDLIGHT",
898 | "SUBTRACT",
899 | "DIVIDE"
900 | ]
```