diff --git a/app/package.json b/app/package.json index c225e08962..3ea9bdcc77 100644 --- a/app/package.json +++ b/app/package.json @@ -6,25 +6,34 @@ "license": "None", "private": true, "dependencies": { - "@arizeai/components": "^1.8.1", + "@arizeai/components": "^1.8.7", "@arizeai/openinference-semantic-conventions": "^0.10.0", "@arizeai/point-cloud": "^3.0.6", "@codemirror/autocomplete": "6.12.0", "@codemirror/lang-javascript": "^6.2.2", "@codemirror/lang-json": "6.0.1", "@codemirror/lang-python": "6.1.3", + "@codemirror/language": "^6.10.3", "@codemirror/lint": "^6.8.1", "@codemirror/view": "^6.28.5", + "@dnd-kit/core": "^6.1.0", + "@dnd-kit/sortable": "^8.0.0", + "@dnd-kit/utilities": "^3.2.2", + "@lezer/highlight": "^1.2.1", + "@lezer/lr": "^1.4.2", "@react-three/drei": "^9.108.4", "@react-three/fiber": "8.0.12", "@tanstack/react-table": "^8.19.3", + "@uiw/codemirror-theme-github": "^4.23.5", "@uiw/codemirror-theme-nord": "^4.23.0", "@uiw/react-codemirror": "^4.23.0", + "codemirror-json-schema": "^0.7.8", "copy-to-clipboard": "^3.3.3", "d3-format": "^3.1.0", "d3-scale-chromatic": "^3.1.0", "d3-time-format": "^4.1.0", "date-fns": "^3.6.0", + "graphql-ws": "^5.16.0", "lodash": "^4.17.21", "normalize.css": "^8.0.1", "polished": "^4.3.1", @@ -46,14 +55,18 @@ "three-stdlib": "^2.30.4", "use-deep-compare-effect": "^1.8.1", "use-zustand": "^0.0.4", + "zod": "^3.23.8", + "zod-to-json-schema": "^3.23.3", "zustand": "^4.5.4" }, "devDependencies": { "@emotion/react": "^11.11.4", + "@lezer/generator": "^1.7.1", "@playwright/test": "^1.48.0", "@types/d3-format": "^3.0.4", "@types/d3-scale-chromatic": "^3.0.3", "@types/d3-time-format": "^4.0.3", + "@types/json-schema": "^7.0.15", "@types/lodash": "^4.17.7", "@types/node": "^22.5.4", "@types/react": "18.3.10", @@ -65,6 +78,7 @@ "@typescript-eslint/eslint-plugin": "^7.16.1", "@typescript-eslint/parser": "^7.16.1", "@vitejs/plugin-react": "^4.3.1", + "babel-plugin-relay": "^18.1.0", "cpy-cli": "^5.0.0", "eslint": "^8.57.0", "eslint-plugin-react": "^7.34.4", @@ -80,7 +94,8 @@ "typescript": "~5.4.5", "vite": "^5.3.6", "vite-plugin-relay": "^2.1.0", - "vitest": "^2.1.2" + "vitest": "^2.1.2", + "vitest-canvas-mock": "^0.3.3" }, "scripts": { "preinstall": "npx only-allow pnpm", @@ -88,6 +103,7 @@ "build:static": "cpy ./static ../src/phoenix/server", "build:relay": "relay-compiler", "test": "vitest run", + "test:watch": "vitest", "test:e2e": "playwright test", "test:e2e:ui": "playwright test --ui", "dev": "pnpm run dev:server & pnpm run build:static && pnpm run build:relay && vite", diff --git a/app/playwright.config.ts b/app/playwright.config.ts index 11bdacec29..17d2b893ac 100644 --- a/app/playwright.config.ts +++ b/app/playwright.config.ts @@ -57,6 +57,8 @@ export default defineConfig({ retries: process.env.CI ? 2 : 0, /* Reporter to use. See https://playwright.dev/docs/test-reporters */ reporter: "html", + /* Opt out of parallel tests on CI. */ + // workers: process.env.CI ? 1 : undefined, /* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */ use: { /* Base URL to use in actions like `await page.goto('/')`. */ diff --git a/app/pnpm-lock.yaml b/app/pnpm-lock.yaml index 2e49b9a232..2f465e6e3f 100644 --- a/app/pnpm-lock.yaml +++ b/app/pnpm-lock.yaml @@ -12,8 +12,8 @@ importers: .: dependencies: '@arizeai/components': - specifier: ^1.8.1 - version: 1.8.1(@types/react@18.3.10)(eslint@8.57.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + specifier: ^1.8.7 + version: 1.8.7(@types/react@18.3.10)(eslint@8.57.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@arizeai/openinference-semantic-conventions': specifier: ^0.10.0 version: 0.10.0 @@ -22,7 +22,7 @@ importers: version: 3.0.6(@react-three/drei@9.108.4(@react-three/fiber@8.0.12(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(three@0.139.2))(@types/react@18.3.10)(@types/three@0.149.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(three@0.139.2))(@react-three/fiber@8.0.12(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(three@0.139.2))(react@18.3.1)(three-stdlib@2.30.4(three@0.139.2))(three@0.139.2) '@codemirror/autocomplete': specifier: 6.12.0 - version: 6.12.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.28.5)(@lezer/common@1.2.1) + version: 6.12.0(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1) '@codemirror/lang-javascript': specifier: ^6.2.2 version: 6.2.2 @@ -31,13 +31,31 @@ importers: version: 6.0.1 '@codemirror/lang-python': specifier: 6.1.3 - version: 6.1.3(@codemirror/state@6.4.1)(@codemirror/view@6.28.5)(@lezer/common@1.2.1) + version: 6.1.3(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1) + '@codemirror/language': + specifier: ^6.10.3 + version: 6.10.3 '@codemirror/lint': specifier: ^6.8.1 version: 6.8.1 '@codemirror/view': specifier: ^6.28.5 - version: 6.28.5 + version: 6.29.0 + '@dnd-kit/core': + specifier: ^6.1.0 + version: 6.1.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@dnd-kit/sortable': + specifier: ^8.0.0 + version: 8.0.0(@dnd-kit/core@6.1.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1) + '@dnd-kit/utilities': + specifier: ^3.2.2 + version: 3.2.2(react@18.3.1) + '@lezer/highlight': + specifier: ^1.2.1 + version: 1.2.1 + '@lezer/lr': + specifier: ^1.4.2 + version: 1.4.2 '@react-three/drei': specifier: ^9.108.4 version: 9.108.4(@react-three/fiber@8.0.12(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(three@0.139.2))(@types/react@18.3.10)(@types/three@0.149.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(three@0.139.2) @@ -47,12 +65,18 @@ importers: '@tanstack/react-table': specifier: ^8.19.3 version: 8.19.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@uiw/codemirror-theme-github': + specifier: ^4.23.5 + version: 4.23.5(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0) '@uiw/codemirror-theme-nord': specifier: ^4.23.0 - version: 4.23.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.28.5) + version: 4.23.0(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0) '@uiw/react-codemirror': specifier: ^4.23.0 - version: 4.23.0(@babel/runtime@7.24.8)(@codemirror/autocomplete@6.12.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.28.5)(@lezer/common@1.2.1))(@codemirror/language@6.10.2)(@codemirror/lint@6.8.1)(@codemirror/search@6.5.6)(@codemirror/state@6.4.1)(@codemirror/theme-one-dark@6.1.2)(@codemirror/view@6.28.5)(codemirror@6.0.1(@lezer/common@1.2.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + version: 4.23.0(@babel/runtime@7.24.8)(@codemirror/autocomplete@6.12.0(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1))(@codemirror/language@6.10.3)(@codemirror/lint@6.8.1)(@codemirror/search@6.5.6)(@codemirror/state@6.4.1)(@codemirror/theme-one-dark@6.1.2)(@codemirror/view@6.29.0)(codemirror@6.0.1(@lezer/common@1.2.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + codemirror-json-schema: + specifier: ^0.7.8 + version: 0.7.8(@codemirror/language@6.10.3)(@codemirror/lint@6.8.1)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1)(typescript@5.4.5)(vite@5.3.6(@types/node@22.5.4)) copy-to-clipboard: specifier: ^3.3.3 version: 3.3.3 @@ -68,6 +92,9 @@ importers: date-fns: specifier: ^3.6.0 version: 3.6.0 + graphql-ws: + specifier: ^5.16.0 + version: 5.16.0(graphql@16.9.0) lodash: specifier: ^4.17.21 version: 4.17.21 @@ -131,6 +158,12 @@ importers: use-zustand: specifier: ^0.0.4 version: 0.0.4(react@18.3.1) + zod: + specifier: ^3.23.8 + version: 3.23.8 + zod-to-json-schema: + specifier: ^3.23.3 + version: 3.23.3(zod@3.23.8) zustand: specifier: ^4.5.4 version: 4.5.4(@types/react@18.3.10)(react@18.3.1) @@ -138,6 +171,9 @@ importers: '@emotion/react': specifier: ^11.11.4 version: 11.11.4(@types/react@18.3.10)(react@18.3.1) + '@lezer/generator': + specifier: ^1.7.1 + version: 1.7.1 '@playwright/test': specifier: ^1.48.0 version: 1.48.0 @@ -150,6 +186,9 @@ importers: '@types/d3-time-format': specifier: ^4.0.3 version: 4.0.3 + '@types/json-schema': + specifier: ^7.0.15 + version: 7.0.15 '@types/lodash': specifier: ^4.17.7 version: 4.17.7 @@ -183,6 +222,9 @@ importers: '@vitejs/plugin-react': specifier: ^4.3.1 version: 4.3.1(vite@5.3.6(@types/node@22.5.4)) + babel-plugin-relay: + specifier: ^18.1.0 + version: 18.1.0 cpy-cli: specifier: ^5.0.0 version: 5.0.0 @@ -227,10 +269,13 @@ importers: version: 5.3.6(@types/node@22.5.4) vite-plugin-relay: specifier: ^2.1.0 - version: 2.1.0(babel-plugin-relay@17.0.0)(vite@5.3.6(@types/node@22.5.4)) + version: 2.1.0(babel-plugin-relay@18.1.0)(vite@5.3.6(@types/node@22.5.4)) vitest: specifier: ^2.1.2 version: 2.1.2(@types/node@22.5.4)(jsdom@25.0.1) + vitest-canvas-mock: + specifier: ^0.3.3 + version: 0.3.3(vitest@2.1.2(@types/node@22.5.4)(jsdom@25.0.1)) packages: @@ -238,8 +283,8 @@ packages: resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} engines: {node: '>=6.0.0'} - '@arizeai/components@1.8.1': - resolution: {integrity: sha512-djcZ9noXJRaHsYHYJXAGKeON+vacT3aJvKdFcF0hJeWZ/j7wMGCpvzYwxRgLUooZvj5ifTGBzG9DmF6/sC+v5g==} + '@arizeai/components@1.8.7': + resolution: {integrity: sha512-Sw4vWR+Cr5VDJ73kqvfQa7ar1GQNQ8PUTfnlF7xxl/kRXaGtEtTqQg7/WQLVgkNna9cMXgUhyXkpiZKe7JkX2A==} engines: {node: '>=14'} peerDependencies: react: '>=18' @@ -270,10 +315,6 @@ packages: resolution: {integrity: sha512-5e3FI4Q3M3Pbr21+5xJwCv6ZT6KmGkI0vw3Tozy5ODAQFTIWe37iT8Cr7Ice2Ntb+M3iSKCEWMB1MBgKrW3whg==} engines: {node: '>=6.9.0'} - '@babel/generator@7.24.10': - resolution: {integrity: sha512-o9HBZL1G2129luEUlG1hB4N/nlYNWHnpwlND9eOMclRqqu1YDy2sSYVCFUZwl8I1Gxh+QSRrP2vD7EpUmFVXxg==} - engines: {node: '>=6.9.0'} - '@babel/generator@7.25.6': resolution: {integrity: sha512-VPC82gr1seXOpkjAAKoLhP50vx4vGNlF4msF64dSFq1P8RfB+QAuJWGHPXXPc8QyfVWwwB/TNNU4+ayZmHNbZw==} engines: {node: '>=6.9.0'} @@ -296,14 +337,6 @@ packages: resolution: {integrity: sha512-DoiN84+4Gnd0ncbBOM9AZENV4a5ZiL39HYMyZJGZ/AZEykHYdJw0wW3kdcsh9/Kn+BRXHLkkklZ51ecPKmI1CQ==} engines: {node: '>=6.9.0'} - '@babel/helper-function-name@7.24.7': - resolution: {integrity: sha512-FyoJTsj/PEUWu1/TYRiXTIHc8lbw+TDYkZuoE43opPS5TrI7MyONBE1oNvfguEXAD9yhQRrVBnXdXzSLQl9XnA==} - engines: {node: '>=6.9.0'} - - '@babel/helper-hoist-variables@7.24.7': - resolution: {integrity: sha512-MJJwhkoGy5c4ehfoRyrJ/owKeMl19U54h27YYftT0o2teQ3FJ3nQUf/I3LlJsX4l3qlw7WRXUmiyajvHXoTubQ==} - engines: {node: '>=6.9.0'} - '@babel/helper-member-expression-to-functions@7.24.8': resolution: {integrity: sha512-LABppdt+Lp/RlBxqrh4qgf1oEH/WxdzQNDJIu5gC/W1GyvPVrOBiItmmM8wan2fm4oYqFuFfkXmlGpLQhPY8CA==} engines: {node: '>=6.9.0'} @@ -364,11 +397,6 @@ packages: resolution: {integrity: sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==} engines: {node: '>=6.9.0'} - '@babel/parser@7.24.8': - resolution: {integrity: sha512-WzfbgXOkGzZiXXCqk43kKwZjzwx4oulxZi3nq2TYL9mOjQv6kYwul9mz6ID36njuL7Xkp6nJEfok848Zj10j/w==} - engines: {node: '>=6.0.0'} - hasBin: true - '@babel/parser@7.25.6': resolution: {integrity: sha512-trGdfBdbD0l1ZPmcJ83eNxB9rbEax4ALFTF7fN386TMYbeCQbyme5cOEXQhbGXKebwGaB/J52w1mrklMcbgy6Q==} engines: {node: '>=6.0.0'} @@ -397,30 +425,27 @@ packages: resolution: {integrity: sha512-5F7SDGs1T72ZczbRwbGO9lQi0NLjQxzl6i4lJxLxfW9U5UluCSyEJeniWvnhl3/euNiqQVbo8zruhsDfid0esA==} engines: {node: '>=6.9.0'} - '@babel/template@7.24.7': - resolution: {integrity: sha512-jYqfPrU9JTF0PmPy1tLYHW4Mp4KlgxJD9l2nP9fD6yT/ICi554DmrWBAEYpIelzjHf1msDP3PxJIRt/nFNfBig==} - engines: {node: '>=6.9.0'} - '@babel/template@7.25.0': resolution: {integrity: sha512-aOOgh1/5XzKvg1jvVz7AVrx2piJ2XBi227DHmbY6y+bM9H2FlN+IfecYu4Xl0cNiiVejlsCri89LUsbj8vJD9Q==} engines: {node: '>=6.9.0'} - '@babel/traverse@7.24.8': - resolution: {integrity: sha512-t0P1xxAPzEDcEPmjprAQq19NWum4K0EQPjMwZQZbHt+GiZqvjCHjj755Weq1YRPVzBI+3zSfvScfpnuIecVFJQ==} - engines: {node: '>=6.9.0'} - '@babel/traverse@7.25.6': resolution: {integrity: sha512-9Vrcx5ZW6UwK5tvqsj0nGpp/XzqthkT0dqIc9g1AdtygFToNtTF67XzYS//dm+SAK9cp3B9R4ZO/46p63SCjlQ==} engines: {node: '>=6.9.0'} - '@babel/types@7.24.9': - resolution: {integrity: sha512-xm8XrMKz0IlUdocVbYJe0Z9xEgidU7msskG8BbhnTPK/HZ2z/7FP7ykqPgrUH+C+r414mNfNWam1f2vqOjqjYQ==} - engines: {node: '>=6.9.0'} - '@babel/types@7.25.6': resolution: {integrity: sha512-/l42B1qxpG6RdfYf343Uw1vmDjeNhneUXtzhojE7pDgfpEypmRhI6j1kr17XCVv4Cgl9HdAiQY2x0GwKm7rWCw==} engines: {node: '>=6.9.0'} + '@changesets/changelog-github@0.4.8': + resolution: {integrity: sha512-jR1DHibkMAb5v/8ym77E4AMNWZKB5NPzw5a5Wtqm1JepAuIF+hrKp2u04NKM14oBZhHglkCfrla9uq8ORnK/dw==} + + '@changesets/get-github-info@0.5.2': + resolution: {integrity: sha512-JppheLu7S114aEs157fOZDjFqUDpm7eHdq5E8SSR0gUBTEK0cNSHsrSR5a66xs0z3RWuo46QvA3vawp8BxDHvg==} + + '@changesets/types@5.2.1': + resolution: {integrity: sha512-myLfHbVOqaq9UtUKqR/nZA/OY7xFjQMdfgfqeZIBK4d0hA6pgxArvdv8M+6NUzzBsjWLOtvApv8YHr4qM+Kpfg==} + '@codemirror/autocomplete@6.12.0': resolution: {integrity: sha512-r4IjdYFthwbCQyvqnSlx0WBHRHi8nBvU+WjJxFUij81qsBfhNudf/XKKmmC2j3m0LaOYUQTf3qiEK1J8lO1sdg==} peerDependencies: @@ -429,12 +454,17 @@ packages: '@codemirror/view': ^6.0.0 '@lezer/common': ^1.0.0 + '@codemirror/autocomplete@6.18.1': + resolution: {integrity: sha512-iWHdj/B1ethnHRTwZj+C1obmmuCzquH29EbcKr0qIjA9NfDeBDJ7vs+WOHsFeLeflE4o+dHfYndJloMKHUkWUA==} + peerDependencies: + '@codemirror/language': ^6.0.0 + '@codemirror/state': ^6.0.0 + '@codemirror/view': ^6.0.0 + '@lezer/common': ^1.0.0 + '@codemirror/commands@6.6.0': resolution: {integrity: sha512-qnY+b7j1UNcTS31Eenuc/5YJB6gQOzkUoNmJQc0rznwqSRpeaWWpjkWy2C/MPTcePpsKJEM26hXrOXl1+nceXg==} - '@codemirror/commands@6.7.0': - resolution: {integrity: sha512-+cduIZ2KbesDhbykV02K25A5xIVrquSPz4UxxYBemRlAT2aW8dhwUgLDwej7q/RJUHKk4nALYcR1puecDvbdqw==} - '@codemirror/lang-javascript@6.2.2': resolution: {integrity: sha512-VGQfY+FCc285AhWuwjYxQyUQcYurWlxdKYT4bqwr3Twnd5wP5WSeu52t4tvvuWmljT4EmgEgZCqSieokhtY8hg==} @@ -444,15 +474,15 @@ packages: '@codemirror/lang-python@6.1.3': resolution: {integrity: sha512-S9w2Jl74hFlD5nqtUMIaXAq9t5WlM0acCkyuQWUUSvZclk1sV+UfnpFiZzuZSG+hfEaOmxKR5UxY/Uxswn7EhQ==} - '@codemirror/language@6.10.2': - resolution: {integrity: sha512-kgbTYTo0Au6dCSc/TFy7fK3fpJmgHDv1sG1KNQKJXVi+xBTEeBPY/M30YXiU6mMXeH+YIDLsbrT4ZwNRdtF+SA==} + '@codemirror/lang-yaml@6.1.1': + resolution: {integrity: sha512-HV2NzbK9bbVnjWxwObuZh5FuPCowx51mEfoFT9y3y+M37fA3+pbxx4I7uePuygFzDsAmCTwQSc/kXh/flab4uw==} + + '@codemirror/language@6.10.3': + resolution: {integrity: sha512-kDqEU5sCP55Oabl6E7m5N+vZRoc0iWqgDVhEKifcHzPzjqCegcO4amfrYVL9PmPZpl4G0yjkpTpUO/Ui8CzO8A==} '@codemirror/lint@6.8.1': resolution: {integrity: sha512-IZ0Y7S4/bpaunwggW2jYqwLuHj0QtESf5xcROewY6+lDNwZ/NzvR4t+vpYgg9m7V8UXLPYqG+lu3DF470E5Oxg==} - '@codemirror/lint@6.8.2': - resolution: {integrity: sha512-PDFG5DjHxSEjOXk9TQYYVjZDqlZTFaDBfhQixHnQOEVDDNHUbEh/hstAjcQJaA6FQdZTD1hquXTK0rVBLADR1g==} - '@codemirror/search@6.5.6': resolution: {integrity: sha512-rpMgcsh7o0GuCDUXKPvww+muLA1pDJaFrpq/CCHtpQJYz8xopu4D1hPcKRoDD0YlF8gZaqTNIRa4VRBWyhyy7Q==} @@ -462,14 +492,30 @@ packages: '@codemirror/theme-one-dark@6.1.2': resolution: {integrity: sha512-F+sH0X16j/qFLMAfbciKTxVOwkdAS336b7AXTKOZhy8BR3eH/RelsnLgLFINrpST63mmN2OuwUt0W2ndUgYwUA==} - '@codemirror/view@6.28.5': - resolution: {integrity: sha512-NkUtfUa1lV7Jqg5DfHE/uLl7jKyoymDkaueMQXzePYuezL7FwX3ATANy74iAGlHCGe25hBGB0R+I5dC5EZ5JBg==} - '@codemirror/view@6.29.0': resolution: {integrity: sha512-ED4ims4fkf7eOA+HYLVP8VVg3NMllt1FPm9PEJBfYFnidKlRITBaua38u68L1F60eNtw2YNcDN5jsIzhKZwWQA==} - '@codemirror/view@6.34.1': - resolution: {integrity: sha512-t1zK/l9UiRqwUNPm+pdIT0qzJlzuVckbTEMVNFhfWkGiBQClstzg+78vedCvLSX0xJEZ6lwZbPpnljL7L6iwMQ==} + '@dnd-kit/accessibility@3.1.0': + resolution: {integrity: sha512-ea7IkhKvlJUv9iSHJOnxinBcoOI3ppGnnL+VDJ75O45Nss6HtZd8IdN8touXPDtASfeI2T2LImb8VOZcL47wjQ==} + peerDependencies: + react: '>=16.8.0' + + '@dnd-kit/core@6.1.0': + resolution: {integrity: sha512-J3cQBClB4TVxwGo3KEjssGEXNJqGVWx17aRTZ1ob0FliR5IjYgTxl5YJbKTzA6IzrtelotH19v6y7uoIRUZPSg==} + peerDependencies: + react: '>=16.8.0' + react-dom: '>=16.8.0' + + '@dnd-kit/sortable@8.0.0': + resolution: {integrity: sha512-U3jk5ebVXe1Lr7c2wU7SBZjcWdQP+j7peHJfCspnA81enlu88Mgd7CC8Q+pub9ubP7eKVETzJW+IBAhsqbSu/g==} + peerDependencies: + '@dnd-kit/core': ^6.1.0 + react: '>=16.8.0' + + '@dnd-kit/utilities@3.2.2': + resolution: {integrity: sha512-+MKAJEOfaBe5SmV6t34p80MMKhjvUz0vRrvVJbPT0WElzaOJ/1xs+D+KDv+tD/NE5ujfrChEcshd4fLn0wpiqg==} + peerDependencies: + react: '>=16.8.0' '@emotion/babel-plugin@11.11.0': resolution: {integrity: sha512-m4HEDZleaaCH+XgDDsPF15Ht6wTLsgDTeR3WYj9Q/k76JtWhrJjcP4+/XlG8LGT/Rol9qUfOIztXeA84ATpqPQ==} @@ -729,8 +775,9 @@ packages: '@lezer/common@1.2.1': resolution: {integrity: sha512-yemX0ZD2xS/73llMZIK6KplkjIjf2EvAHcinDi/TfJ9hS25G0388+ClHt6/3but0oOxinTcQHJLDXh6w1crzFQ==} - '@lezer/highlight@1.2.0': - resolution: {integrity: sha512-WrS5Mw51sGrpqjlh3d4/fOwpEV2Hd3YOkp9DBt4k8XZQcoTHZFB7sx030A6OcahF4J1nDQAa3jXlTVVYH50IFA==} + '@lezer/generator@1.7.1': + resolution: {integrity: sha512-MgPJN9Si+ccxzXl3OAmCeZuUKw4XiPl4y664FX/hnnyG9CTqUPq65N3/VGPA2jD23D7QgMTtNqflta+cPN+5mQ==} + hasBin: true '@lezer/highlight@1.2.1': resolution: {integrity: sha512-Z5duk4RN/3zuVO7Jq0pGLJ3qynpxUVsh7IbUbGj88+uV2ApSAn6kWg2au3iJb+0Zi7kKtqffIESgNcRXWZWmSA==} @@ -741,12 +788,15 @@ packages: '@lezer/json@1.0.2': resolution: {integrity: sha512-xHT2P4S5eeCYECyKNPhr4cbEL9tc8w83SPwRC373o9uEdrvGKTZoJVAGxpOsZckMlEh9W23Pc72ew918RWQOBQ==} - '@lezer/lr@1.4.1': - resolution: {integrity: sha512-CHsKq8DMKBf9b3yXPDIU4DbH+ZJd/sJdYOW2llbW/HudP5u0VS6Bfq1hLYfgU7uAYGFIyGGQIsSOXGPEErZiJw==} + '@lezer/lr@1.4.2': + resolution: {integrity: sha512-pu0K1jCIdnQ12aWNaAVU5bzi7Bd1w54J3ECgANPmYLtQKP0HBj2cE/5coBD66MT10xbtIuUr7tg0Shbsvk0mDA==} '@lezer/python@1.1.14': resolution: {integrity: sha512-ykDOb2Ti24n76PJsSa4ZoDF0zH12BSw1LGfQXCYJhJyOGiFTfGaX0Du66Ze72R+u/P35U+O6I9m8TFXov1JzsA==} + '@lezer/yaml@1.0.3': + resolution: {integrity: sha512-GuBLekbw9jDBDhGur82nuwkxKQ+a3W5H0GfaAthDXcAu+XdpS43VlnxA9E9hllkpSP5ellRDKjLLj7Lu9Wr6xA==} + '@mediapipe/tasks-vision@0.10.8': resolution: {integrity: sha512-Rp7ll8BHrKB3wXaRFKhrltwZl1CiXGdibPxuWXvqGnKTnv8fqa/nvftYNuSbf+pbJWKYCXdBtYTITdAUTGGh0Q==} @@ -1233,6 +1283,30 @@ packages: cpu: [x64] os: [win32] + '@sagold/json-pointer@5.1.2': + resolution: {integrity: sha512-+wAhJZBXa6MNxRScg6tkqEbChEHMgVZAhTHVJ60Y7sbtXtu9XA49KfUkdWlS2x78D6H9nryiKePiYozumauPfA==} + + '@sagold/json-query@6.2.0': + resolution: {integrity: sha512-7bOIdUE6eHeoWtFm8TvHQHfTVSZuCs+3RpOKmZCDBIOrxpvF/rNFTeuvIyjHva/RR0yVS3kQtr+9TW72LQEZjA==} + + '@shikijs/core@1.22.0': + resolution: {integrity: sha512-S8sMe4q71TJAW+qG93s5VaiihujRK6rqDFqBnxqvga/3LvqHEnxqBIOPkt//IdXVtHkQWKu4nOQNk0uBGicU7Q==} + + '@shikijs/engine-javascript@1.22.0': + resolution: {integrity: sha512-AeEtF4Gcck2dwBqCFUKYfsCq0s+eEbCEbkUuFou53NZ0sTGnJnJ/05KHQFZxpii5HMXbocV9URYVowOP2wH5kw==} + + '@shikijs/engine-oniguruma@1.22.0': + resolution: {integrity: sha512-5iBVjhu/DYs1HB0BKsRRFipRrD7rqjxlWTj4F2Pf+nQSPqc3kcyqFFeZXnBMzDf0HdqaFVvhDRAGiYNvyLP+Mw==} + + '@shikijs/markdown-it@1.22.0': + resolution: {integrity: sha512-IAWi2pbzYndiuXOWnV5Ll4ULQJeWl45WUACl1Xc2KSiBl0JtQmqKvPOGKN7YSZbyIzkB6bWUItRrv5ucO35U+g==} + + '@shikijs/types@1.22.0': + resolution: {integrity: sha512-Fw/Nr7FGFhlQqHfxzZY8Cwtwk5E9nKDUgeLjZgt3UuhcM3yJR9xj3ZGNravZZok8XmEZMiYkSMTPlPkULB8nww==} + + '@shikijs/vscode-textmate@9.3.0': + resolution: {integrity: sha512-jn7/7ky30idSkd/O5yDBfAnVt+JJpepofP/POZ1iMOxK59cOfqIgg/Dj0eFsjOTMw+4ycJN0uhZH/Eb0bs/EUA==} + '@swc/helpers@0.5.12': resolution: {integrity: sha512-KMZNXiGibsW9kvZAO1Pam2JPTDBm+KSHMMHWdsyI/1DbIZjT2A6Gy3hblVXUMEDvUAKq+e0vL0X0o54owWji7g==} @@ -1313,15 +1387,15 @@ packages: '@types/estree-jsx@1.0.5': resolution: {integrity: sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==} - '@types/estree@1.0.5': - resolution: {integrity: sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==} - '@types/estree@1.0.6': resolution: {integrity: sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==} '@types/hast@3.0.4': resolution: {integrity: sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==} + '@types/json-schema@7.0.15': + resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} + '@types/lodash@4.17.7': resolution: {integrity: sha512-8wTvZawATi/lsmNu10/j2hk1KEP0IvjubqPE3cu1Xz7xfXXt5oCq3SNUz4fMIP4XGF9Ky+Ue2tBA3hcS7LSBlA==} @@ -1331,6 +1405,9 @@ packages: '@types/ms@0.7.34': resolution: {integrity: sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==} + '@types/node@20.16.11': + resolution: {integrity: sha512-y+cTCACu92FyA5fgQSAI8A1H429g7aSK2HsO7K4XYUWc4dY5IUz55JSDIYT6/VsOLfGy8vmvQYC2hfb0iF16Uw==} + '@types/node@22.5.4': resolution: {integrity: sha512-FDuKUJQm/ju9fT/SeX/6+gBzoPzlVCzfzmGkwKvRHQVxi4BntVbyIwf6a4Xn62mrvndLiml6z/UBXIdEVjQLXg==} @@ -1340,8 +1417,8 @@ packages: '@types/parse-json@4.0.2': resolution: {integrity: sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==} - '@types/prop-types@15.7.12': - resolution: {integrity: sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q==} + '@types/prop-types@15.7.13': + resolution: {integrity: sha512-hCZTSvwbzWGvhqxp/RqVqwU999pBf2vp7hzIjiYOsl8wqOmUxkQ6ddw1cV3l8811+kdUFus/q4d1Y3E3SyEifA==} '@types/react-dom@18.3.0': resolution: {integrity: sha512-EhwApuTmMBmXuFOikhQLIBUn6uFg81SwLMOAUgodJF14SOBOCMdU04gDoYi0WOJJHD144TL32z4yDqCW3dnkQg==} @@ -1448,6 +1525,9 @@ packages: '@codemirror/state': '>=6.0.0' '@codemirror/view': '>=6.0.0' + '@uiw/codemirror-theme-github@4.23.5': + resolution: {integrity: sha512-gR5rgWUaRoLRavzA6w+/dKE6KMDQdHF82xpnLYQvOwE/1agNS0asowdZUodMXbvOoNLIgcopLm3hXdzzVouuaw==} + '@uiw/codemirror-theme-nord@4.23.0': resolution: {integrity: sha512-oBXSEhp313ZnWWOF/WC8cOLmAmBkLHwfFS8CBsMHWlgwPBYxxzoJUc0AuFdH16zhnc5MoGhrFccZ8RT8tFv15w==} @@ -1458,6 +1538,13 @@ packages: '@codemirror/state': '>=6.0.0' '@codemirror/view': '>=6.0.0' + '@uiw/codemirror-themes@4.23.5': + resolution: {integrity: sha512-yWUTpaVroxIxjKASQAmKaYy+ZYtF+YB6d8sVmSRK2TVD13M+EWvVT2jBGFLqR1UVg7G0W/McAy8xdeTg+a3slg==} + peerDependencies: + '@codemirror/language': '>=6.0.0' + '@codemirror/state': '>=6.0.0' + '@codemirror/view': '>=6.0.0' + '@uiw/react-codemirror@4.23.0': resolution: {integrity: sha512-MnqTXfgeLA3fsUUQjqjJgemEuNyoGALgsExVm0NQAllAAi1wfj+IoKFeK+h3XXMlTFRCFYOUh4AHDv0YXJLsOg==} peerDependencies: @@ -1615,8 +1702,8 @@ packages: resolution: {integrity: sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg==} engines: {node: '>=10', npm: '>=6'} - babel-plugin-relay@17.0.0: - resolution: {integrity: sha512-aBIEDp6jFOAO+CD+GGKv4/WOQ7cnS6hZysJI0dFbLRQXZ6XQar0lytxQ4zuqPU0RpGnUeN5htK3NTewu2TZ53A==} + babel-plugin-relay@18.1.0: + resolution: {integrity: sha512-k3ehQO5CEOgKLJmHAswkV+qatPdfH2fAkibo/ejUsSyadE+iA+Uzb2uBcVdgvLRSH+n1euuqtV2BBUEKeJRmrg==} bail@2.0.2: resolution: {integrity: sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==} @@ -1721,6 +1808,18 @@ packages: resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==} engines: {node: '>=6'} + codemirror-json-schema@0.7.8: + resolution: {integrity: sha512-tfHRirCWbkGGdTY/Y9t/fNu4OHCOWULoATycQmV3lgGzS+OKPB41clji9nNDtF1vnSYH2pD/BVzzD7B0sXgEdQ==} + peerDependencies: + '@codemirror/language': ^6.10.2 + '@codemirror/lint': ^6.8.0 + '@codemirror/state': ^6.4.1 + '@codemirror/view': ^6.27.0 + '@lezer/common': ^1.2.1 + + codemirror-json5@1.0.3: + resolution: {integrity: sha512-HmmoYO2huQxoaoG5ARKjqQc9mz7/qmNPvMbISVfIE2Gk1+4vZQg9X3G6g49MYM5IK00Ol3aijd7OKrySuOkA7Q==} + codemirror@6.0.1: resolution: {integrity: sha512-J8j+nZ+CdWmIeFIGXEFbFPtpiYacFMDR8GlHK3IyHQJMCaVRfGx9NT+Hxivv1ckLWPvNdZqndbr/7lVhrf/Svg==} @@ -1744,6 +1843,9 @@ packages: comma-separated-tokens@2.0.3: resolution: {integrity: sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==} + commander@2.20.3: + resolution: {integrity: sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==} + concat-map@0.0.1: resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} @@ -1796,6 +1898,9 @@ packages: resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==} engines: {node: '>= 8'} + cssfontparser@1.2.1: + resolution: {integrity: sha512-6tun4LoZnj7VN6YeegOVb67KBX/7JJsqvj+pv3ZA7F878/eN33AbGa5b/S/wXxS/tcp8nc40xRUrsPlxIyNUPg==} + cssstyle@4.1.0: resolution: {integrity: sha512-h66W1URKpBS5YMI/V8PyXvTMFT8SupJ1IzoIV8IeBC/ji8WVmrO8dGlTi+2dh6whmdk6BiKJLD/ZBkhWbcg6nA==} engines: {node: '>=18'} @@ -1867,21 +1972,15 @@ packages: resolution: {integrity: sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA==} engines: {node: '>= 0.4'} + dataloader@1.4.0: + resolution: {integrity: sha512-68s5jYdlvasItOJnCuI2Q9s4q98g0pCyL3HrcKJu8KNugUl8ahgmZYg38ysLTgQjjXX3H8CJLkAvWrclWfcalw==} + date-fns@3.6.0: resolution: {integrity: sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==} debounce@1.2.1: resolution: {integrity: sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==} - debug@4.3.5: - resolution: {integrity: sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==} - engines: {node: '>=6.0'} - peerDependencies: - supports-color: '*' - peerDependenciesMeta: - supports-color: - optional: true - debug@4.3.7: resolution: {integrity: sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==} engines: {node: '>=6.0'} @@ -1907,6 +2006,10 @@ packages: deep-is@0.1.4: resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + deepmerge@4.3.1: + resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==} + engines: {node: '>=0.10.0'} + define-data-property@1.1.4: resolution: {integrity: sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==} engines: {node: '>= 0.4'} @@ -1937,6 +2040,9 @@ packages: resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} engines: {node: '>=8'} + discontinuous-range@1.0.0: + resolution: {integrity: sha512-c68LpLbO+7kP/b1Hr1qs8/BJ09F5khZGTxqxZuhzxpmwJKOgRFHJWIb9/KmqnqHhLdO55aOxFH/EGBvUQbL/RQ==} + doctrine@2.1.0: resolution: {integrity: sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==} engines: {node: '>=0.10.0'} @@ -1948,9 +2054,17 @@ packages: dom-helpers@5.2.1: resolution: {integrity: sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==} + dotenv@8.6.0: + resolution: {integrity: sha512-IrPdXQsk2BbzvCBGBOTmmSH5SodmqZNt4ERAZDmW4CT+tL8VtvinqywuANaFu4bOMWki16nqf0e4oC0QIaDr/g==} + engines: {node: '>=10'} + draco3d@1.5.7: resolution: {integrity: sha512-m6WCKt/erDXcw+70IJXnG7M3awwQPAsZvJGX5zY7beBqpELw6RDGkYVU0W43AFxye4pDZ5i2Lbyc/NNGqwjUVQ==} + ebnf@1.9.1: + resolution: {integrity: sha512-uW2UKSsuty9ANJ3YByIQE4ANkD8nqUPO7r6Fwcc1ADKPe9FRdcPpMl3VEput4JSvKBJ4J86npIC2MLP0pYkCuw==} + hasBin: true + electron-to-chromium@1.4.829: resolution: {integrity: sha512-5qp1N2POAfW0u1qGAxXEtz6P7bO1m6gpZr5hdf5ve6lxpLM7MpiM4jIPz7xcrNlClQMafbyUDDWjlIQZ1Mw0Rw==} @@ -2056,6 +2170,7 @@ packages: eslint@8.57.0: resolution: {integrity: sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + deprecated: This version is no longer supported. Please see https://eslint.org/version-support for other options. hasBin: true espree@9.6.1: @@ -2095,6 +2210,9 @@ packages: extend@3.0.2: resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} + fast-copy@3.0.2: + resolution: {integrity: sha512-dl0O9Vhju8IrcLndv2eU4ldt1ftXMqqfgN4H1cpmGV7P6jeB9FwpN9a2c8DPGE1Ys88rNUJVYDHq73CGAGOPfQ==} + fast-deep-equal@3.1.3: resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} @@ -2152,8 +2270,8 @@ packages: for-each@0.3.3: resolution: {integrity: sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==} - form-data@4.0.1: - resolution: {integrity: sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw==} + form-data@4.0.0: + resolution: {integrity: sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==} engines: {node: '>= 6'} fs.realpath@1.0.0: @@ -2227,6 +2345,9 @@ packages: resolution: {integrity: sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + globrex@0.1.2: + resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==} + glsl-noise@0.0.0: resolution: {integrity: sha512-b/ZCF6amfAUb7dJM/MxRs7AetQEahYzJ8PtgfrmEdtw6uyGOr+ZSGtgjFm6mfsBkxJ4d2W7kg+Nlqzqvn3Bc0w==} @@ -2239,6 +2360,12 @@ packages: graphemer@1.4.0: resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} + graphql-ws@5.16.0: + resolution: {integrity: sha512-Ju2RCU2dQMgSKtArPbEtsK5gNLnsQyTNIo/T7cZNp96niC1x0KdJNZV0TIoilceBPQwfb5itrGl8pkFeOUMl4A==} + engines: {node: '>=10'} + peerDependencies: + graphql: '>=0.11 <=16' + graphql@15.3.0: resolution: {integrity: sha512-GTCJtzJmkFLWRfFJuoo9RWWa/FfamUHgiFosxi/X1Ani4AVWbeyBenZTNX6dM+7WSbbFfTo/25eh0LLkwHMw2w==} engines: {node: '>= 10.x'} @@ -2277,6 +2404,9 @@ packages: resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} engines: {node: '>= 0.4'} + hast-util-to-html@9.0.3: + resolution: {integrity: sha512-M17uBDzMJ9RPCqLMO92gNNUDuBSq10a25SDBI08iCCxmorf4Yy6sYHK57n9WAbRAAaU+DuR4W6GN9K4DFZesYg==} + hast-util-to-jsx-runtime@2.3.0: resolution: {integrity: sha512-H/y0+IWPdsLLS738P8tDnrQ8Z+dj12zQQ6WC11TIM21C8WFVoIxcqWXf2H3hiTVZjF1AWqoimGwrTWecWrnmRQ==} @@ -2302,6 +2432,9 @@ packages: html-url-attributes@3.0.0: resolution: {integrity: sha512-/sXbVCWayk6GDVg3ctOX6nxaVj7So40FcFAnWlWGNAB1LpYKcV5Cd10APjPjW80O7zYW2MsjBV4zZ7IZO5fVow==} + html-void-elements@3.0.0: + resolution: {integrity: sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==} + http-proxy-agent@7.0.2: resolution: {integrity: sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==} engines: {node: '>= 14'} @@ -2513,6 +2646,9 @@ packages: iterator.prototype@1.1.2: resolution: {integrity: sha512-DR33HMMr8EzwuRL8Y9D3u2BMj8+RqSE850jfGu59kS7tbmPLzGkZmVSfyCFSDxuZiEY6Rzt3T2NA/qU+NwVj1w==} + jest-canvas-mock@2.5.2: + resolution: {integrity: sha512-vgnpPupjOL6+L5oJXzxTxFrlGEIbHdZqFU+LFNdtLxZ3lRDCl17FlTMM7IatoRQkrcyOTMlDinjUguqmQ6bR2A==} + js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} @@ -2547,9 +2683,15 @@ packages: json-parse-even-better-errors@2.3.1: resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} + json-schema-library@9.3.5: + resolution: {integrity: sha512-5eBDx7cbfs+RjylsVO+N36b0GOPtv78rfqgf2uON+uaHUIC62h63Y8pkV2ovKbaL4ZpQcHp21968x5nx/dFwqQ==} + json-schema-traverse@0.4.1: resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + json-schema@0.4.0: + resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==} + json-stable-stringify-without-jsonify@1.0.1: resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} @@ -2573,12 +2715,18 @@ packages: resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} engines: {node: '>= 0.8.0'} + lezer-json5@2.0.2: + resolution: {integrity: sha512-NRmtBlKW/f8mA7xatKq8IUOq045t8GVHI4kZXrUtYYUdiVeGiO6zKGAV7/nUAnf5q+rYTY+SWX/gvQdFXMjNxQ==} + lie@3.3.0: resolution: {integrity: sha512-UaiMJzeWRlEujzAuw5LokY1L5ecNQYZKfmyZ9L7wDHb/p5etKaxXhohBcrw0EYby+G/NA52vRSN4N39dxHAIwQ==} lines-and-columns@1.2.4: resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} + linkify-it@5.0.0: + resolution: {integrity: sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==} + locate-path@6.0.0: resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} engines: {node: '>=10'} @@ -2611,6 +2759,10 @@ packages: magic-string@0.30.12: resolution: {integrity: sha512-Ea8I3sQMVXr8JhN4z+H/d8zwo+tYDgHE9+5G4Wnrwhs0gaK9fXTKx0Tw5Xwsd/bCPTTZNRAdpyzvoeORe9LYpw==} + markdown-it@14.1.0: + resolution: {integrity: sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==} + hasBin: true + markdown-table@3.0.3: resolution: {integrity: sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==} @@ -2659,6 +2811,9 @@ packages: mdast-util-to-string@4.0.0: resolution: {integrity: sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==} + mdurl@2.0.0: + resolution: {integrity: sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==} + meow@12.1.1: resolution: {integrity: sha512-BhXM0Au22RwUneMPwSCnyhTOizdWoIEPU9sp0Aqa1PnDMR5Wv2FGXYDjuzJEIX+Eo2Rb8xuYe5jrnm5QowQFkw==} engines: {node: '>=16.10'} @@ -2782,8 +2937,11 @@ packages: resolution: {integrity: sha512-8OCq0De/h9ZxseqzCH8Kw/Filf5pF/vMI6+BH7Lu0jXz2pqYCjTAQRolSxRIi+Ax+oCCjlxoJMP0YQ4XlrQNHg==} deprecated: Legacy versions of mkdirp are no longer supported. Please update to mkdirp 1.x. (Note that the API surface has changed to use Promises in 1.x.) - ms@2.1.2: - resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==} + moo-color@1.0.3: + resolution: {integrity: sha512-i/+ZKXMDf6aqYtBhuOcej71YSlbjT3wCO/4H1j8rPvxDJEifdwgg5MaFyu6iYAT8GBZJg2z0dkgK4YMzvURALQ==} + + moo@0.5.2: + resolution: {integrity: sha512-iSAJLHYKnX41mKcJKjqvnAN9sf0LMDTXDEvFv+ffuRR9a1MIuXLjMNL6EsnDHSkKLTWNqQQ5uo61P4EbU4NU+Q==} ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} @@ -2796,6 +2954,10 @@ packages: natural-compare@1.4.0: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + nearley@2.20.1: + resolution: {integrity: sha512-+Mc8UaAebFzgV+KpI5n7DasuuQCHA89dmwm7JXw3TV43ukfNQ9DnBH3Mdb2g/I4Fdxc26pwimBWvjIw0UAILSQ==} + hasBin: true + nested-error-stacks@2.1.1: resolution: {integrity: sha512-9iN1ka/9zmX1ZvLV9ewJYEk9h7RyRRtqdK0woXcqohu8EWIerfPUjYJPg0ULy0UqP7cslmdGc8xKDJcojlKiaw==} @@ -2817,8 +2979,8 @@ packages: nullthrows@1.1.1: resolution: {integrity: sha512-2vPPEi+Z7WqML2jZYddDIfy5Dqb0r2fze2zTxNNknZaFpVHU3mFB3R+DWeJWGVx0ecvttSGlJTI+WG+8Z4cDWw==} - nwsapi@2.2.13: - resolution: {integrity: sha512-cTGB9ptp9dY9A5VbMSe7fQBcl/tt22Vcqdq8+eN93rblOuE0aCFu4aZ2vMwct/2t+lFnosm8RkQW1I0Omb1UtQ==} + nwsapi@2.2.12: + resolution: {integrity: sha512-qXDmcVlZV4XRtKFzddidpfVP4oMSGhga+xdMc25mv8kaLUHtgzCDhUxkrN8exkGdTlLNaXj7CV3GtON7zuGZ+w==} object-assign@4.1.1: resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} @@ -2851,6 +3013,9 @@ packages: once@1.4.0: resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + oniguruma-to-js@0.4.3: + resolution: {integrity: sha512-X0jWUcAlxORhOqqBREgPMgnshB7ZGYszBNspP+tS9hPD3l13CdaXcHbgImoHUHlrvGx/7AvFEkTRhAGYh+jzjQ==} + only-allow@1.2.1: resolution: {integrity: sha512-M7CJbmv7UCopc0neRKdzfoGWaVZC+xC1925GitKH9EAqYFzX9//25Q7oX4+jw0tiCCj+t5l6VZh8UPH23NZkMA==} hasBin: true @@ -2988,6 +3153,10 @@ packages: property-information@6.5.0: resolution: {integrity: sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==} + punycode.js@2.3.1: + resolution: {integrity: sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==} + engines: {node: '>=6'} + punycode@2.3.1: resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} engines: {node: '>=6'} @@ -2995,6 +3164,13 @@ packages: queue-microtask@1.2.3: resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + railroad-diagrams@1.0.0: + resolution: {integrity: sha512-cz93DjNeLY0idrCNOH6PviZGRN9GJhsdm9hpn1YCS879fj4W+x5IFJhhkRZcwVgMmFF7R82UA/7Oh+R8lLZg6A==} + + randexp@0.4.6: + resolution: {integrity: sha512-80WNmd9DA0tmZrw9qQa62GPPWfuXJknrmVmLcxvq4uZBdYqb1wYoKTmnlGUchvVWe0XiLupYkBoXVOxz3C8DYQ==} + engines: {node: '>=0.12'} + react-composer@5.0.3: resolution: {integrity: sha512-1uWd07EME6XZvMfapwZmc7NgCZqDemcvicRi3wMJzXsQLvZ3L7fTHVyPy1bZdnWXM4iPjYuNE+uJ41MLKeTtnA==} peerDependencies: @@ -3107,6 +3283,9 @@ packages: regenerator-runtime@0.14.1: resolution: {integrity: sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==} + regex@4.3.3: + resolution: {integrity: sha512-r/AadFO7owAq1QJVeZ/nq9jNS1vyZt+6t1p/E59B56Rn2GCya+gr1KSyOzNL/er+r+B7phv5jG2xU2Nz1YkmJg==} + regexp.prototype.flags@1.5.2: resolution: {integrity: sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw==} engines: {node: '>= 0.4'} @@ -3154,6 +3333,10 @@ packages: resolution: {integrity: sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA==} hasBin: true + ret@0.1.15: + resolution: {integrity: sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==} + engines: {node: '>=0.12'} + reusify@1.0.4: resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} engines: {iojs: '>=1.0.0', node: '>=0.10.0'} @@ -3236,6 +3419,9 @@ packages: resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} engines: {node: '>=8'} + shiki@1.22.0: + resolution: {integrity: sha512-/t5LlhNs+UOKQCYBtl5ZsH/Vclz73GIqT2yQsCBygr8L/ppTdmpL4w3kPLoZJbMKVWtoG77Ue1feOjZfDxvMkw==} + side-channel@1.0.6: resolution: {integrity: sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==} engines: {node: '>= 0.4'} @@ -3251,6 +3437,10 @@ packages: resolution: {integrity: sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==} engines: {node: '>=12'} + smtp-address-parser@1.0.10: + resolution: {integrity: sha512-Osg9LmvGeAG/hyao4mldbflLOkkr3a+h4m1lwKCK5U8M6ZAr7tdXEz/+/vr752TSGE4MNUlUl9cIK2cB8cgzXg==} + engines: {node: '>=0.10'} + source-map-js@1.2.0: resolution: {integrity: sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==} engines: {node: '>=0.10.0'} @@ -3439,6 +3629,16 @@ packages: peerDependencies: typescript: '>=4.2.0' + tsconfck@3.1.4: + resolution: {integrity: sha512-kdqWFGVJqe+KGYvlSO9NIaWn9jT1Ny4oKVzAJsKii5eoE9snzTJzL4+MMVOMn+fikWGFmKEylcXL710V/kIPJQ==} + engines: {node: ^18 || >=20} + hasBin: true + peerDependencies: + typescript: ^5.0.0 + peerDependenciesMeta: + typescript: + optional: true + tslib@2.6.3: resolution: {integrity: sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==} @@ -3477,6 +3677,9 @@ packages: ua-parser-js@1.0.38: resolution: {integrity: sha512-Aq5ppTOfvrCMgAPneW1HfWj66Xi7XL+/mIy996R1/CLS/rcyJQm6QZdsKrUeivDFQ+Oc9Wyuwor8Ze8peEoUoQ==} + uc.micro@2.1.0: + resolution: {integrity: sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==} + unbox-primitive@1.0.2: resolution: {integrity: sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==} @@ -3537,6 +3740,9 @@ packages: resolution: {integrity: sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==} hasBin: true + valid-url@1.0.9: + resolution: {integrity: sha512-QQDsV8OnSf5Uc30CKSwG9lnhMPe6exHtTXLRYX8uMwKENy640pU+2BgBL0LRbDh/eYRahNCS7aewCx0wf3NYVA==} + vfile-message@4.0.2: resolution: {integrity: sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==} @@ -3557,6 +3763,14 @@ packages: babel-plugin-relay: '>=14.1.0' vite: '>=2.0.0' + vite-tsconfig-paths@4.3.2: + resolution: {integrity: sha512-0Vd/a6po6Q+86rPlntHye7F31zA2URZMbH8M3saAZ/xR9QoGN/L21bxEGfXdWmFdNkqPpRdxFT7nmNe12e9/uA==} + peerDependencies: + vite: '*' + peerDependenciesMeta: + vite: + optional: true + vite@5.3.6: resolution: {integrity: sha512-es78AlrylO8mTVBygC0gTC0FENv0C6T496vvd33ydbjF/mIi9q3XQ9A3NWo5qLGFKywvz10J26813OkLvcQleA==} engines: {node: ^18.0.0 || >=20.0.0} @@ -3585,6 +3799,11 @@ packages: terser: optional: true + vitest-canvas-mock@0.3.3: + resolution: {integrity: sha512-3P968tYBpqYyzzOaVtqnmYjqbe13576/fkjbDEJSfQAkHtC5/UjuRHOhFEN/ZV5HVZIkaROBUWgazDKJ+Ibw+Q==} + peerDependencies: + vitest: '*' + vitest@2.1.2: resolution: {integrity: sha512-veNjLizOMkRrJ6xxb+pvxN6/QAWg95mzcRjtmkepXdN87FNfxAss9RKe2far/G9cQpipfgP2taqg0KiWsquj8A==} engines: {node: ^18.0.0 || >=20.0.0} @@ -3715,6 +3934,11 @@ packages: resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==} engines: {node: '>= 6'} + yaml@2.6.0: + resolution: {integrity: sha512-a6ae//JvKDEra2kdi1qzCyrJW/WZCgFi8ydDV+eXExl95t+5R+ijnqHJbz9tmMh8FUjx3iv2fCQ4dclAQlO2UQ==} + engines: {node: '>= 14'} + hasBin: true + yargs-parser@21.1.1: resolution: {integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==} engines: {node: '>=12'} @@ -3727,6 +3951,11 @@ packages: resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} engines: {node: '>=10'} + zod-to-json-schema@3.23.3: + resolution: {integrity: sha512-TYWChTxKQbRJp5ST22o/Irt9KC5nj7CdBKYB/AosCRdj/wxEMvv4NNaj9XVUHDOIp53ZxArGhnw5HMZziPFjog==} + peerDependencies: + zod: ^3.23.3 + zod-validation-error@3.4.0: resolution: {integrity: sha512-ZOPR9SVY6Pb2qqO5XHt+MkkTRxGXb4EVtnjc9JpXUOtUB1T9Ru7mZOT361AN3MsetVe7R0a1KZshJDZdgp9miQ==} engines: {node: '>=18.0.0'} @@ -3770,7 +3999,7 @@ snapshots: '@jridgewell/gen-mapping': 0.3.5 '@jridgewell/trace-mapping': 0.3.25 - '@arizeai/components@1.8.1(@types/react@18.3.10)(eslint@8.57.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@arizeai/components@1.8.7(@types/react@18.3.10)(eslint@8.57.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@emotion/react': 11.11.4(@types/react@18.3.10)(react@18.3.1) '@react-aria/breadcrumbs': 3.5.13(react@18.3.1) @@ -3833,29 +4062,22 @@ snapshots: dependencies: '@ampproject/remapping': 2.3.0 '@babel/code-frame': 7.24.7 - '@babel/generator': 7.24.10 + '@babel/generator': 7.25.6 '@babel/helper-compilation-targets': 7.24.8 '@babel/helper-module-transforms': 7.24.9(@babel/core@7.24.9) '@babel/helpers': 7.24.8 - '@babel/parser': 7.24.8 - '@babel/template': 7.24.7 - '@babel/traverse': 7.24.8 - '@babel/types': 7.24.9 + '@babel/parser': 7.25.6 + '@babel/template': 7.25.0 + '@babel/traverse': 7.25.6 + '@babel/types': 7.25.6 convert-source-map: 2.0.0 - debug: 4.3.5 + debug: 4.3.7 gensync: 1.0.0-beta.2 json5: 2.2.3 semver: 6.3.1 transitivePeerDependencies: - supports-color - '@babel/generator@7.24.10': - dependencies: - '@babel/types': 7.24.9 - '@jridgewell/gen-mapping': 0.3.5 - '@jridgewell/trace-mapping': 0.3.25 - jsesc: 2.5.2 - '@babel/generator@7.25.6': dependencies: '@babel/types': 7.25.6 @@ -3865,7 +4087,7 @@ snapshots: '@babel/helper-annotate-as-pure@7.24.7': dependencies: - '@babel/types': 7.24.9 + '@babel/types': 7.25.6 '@babel/helper-compilation-targets@7.24.8': dependencies: @@ -3890,28 +4112,19 @@ snapshots: '@babel/helper-environment-visitor@7.24.7': dependencies: - '@babel/types': 7.24.9 - - '@babel/helper-function-name@7.24.7': - dependencies: - '@babel/template': 7.24.7 - '@babel/types': 7.24.9 - - '@babel/helper-hoist-variables@7.24.7': - dependencies: - '@babel/types': 7.24.9 + '@babel/types': 7.25.6 '@babel/helper-member-expression-to-functions@7.24.8': dependencies: '@babel/traverse': 7.25.6 - '@babel/types': 7.24.9 + '@babel/types': 7.25.6 transitivePeerDependencies: - supports-color '@babel/helper-module-imports@7.24.7': dependencies: - '@babel/traverse': 7.24.8 - '@babel/types': 7.24.9 + '@babel/traverse': 7.25.6 + '@babel/types': 7.25.6 transitivePeerDependencies: - supports-color @@ -3928,7 +4141,7 @@ snapshots: '@babel/helper-optimise-call-expression@7.24.7': dependencies: - '@babel/types': 7.24.9 + '@babel/types': 7.25.6 '@babel/helper-plugin-utils@7.24.8': {} @@ -3943,21 +4156,21 @@ snapshots: '@babel/helper-simple-access@7.24.7': dependencies: - '@babel/traverse': 7.24.8 - '@babel/types': 7.24.9 + '@babel/traverse': 7.25.6 + '@babel/types': 7.25.6 transitivePeerDependencies: - supports-color '@babel/helper-skip-transparent-expression-wrappers@7.24.7': dependencies: '@babel/traverse': 7.25.6 - '@babel/types': 7.24.9 + '@babel/types': 7.25.6 transitivePeerDependencies: - supports-color '@babel/helper-split-export-declaration@7.24.7': dependencies: - '@babel/types': 7.24.9 + '@babel/types': 7.25.6 '@babel/helper-string-parser@7.24.8': {} @@ -3967,8 +4180,8 @@ snapshots: '@babel/helpers@7.24.8': dependencies: - '@babel/template': 7.24.7 - '@babel/types': 7.24.9 + '@babel/template': 7.25.0 + '@babel/types': 7.25.6 '@babel/highlight@7.24.7': dependencies: @@ -3977,10 +4190,6 @@ snapshots: js-tokens: 4.0.0 picocolors: 1.0.1 - '@babel/parser@7.24.8': - dependencies: - '@babel/types': 7.24.9 - '@babel/parser@7.25.6': dependencies: '@babel/types': 7.25.6 @@ -4007,33 +4216,12 @@ snapshots: dependencies: regenerator-runtime: 0.14.1 - '@babel/template@7.24.7': - dependencies: - '@babel/code-frame': 7.24.7 - '@babel/parser': 7.24.8 - '@babel/types': 7.24.9 - '@babel/template@7.25.0': dependencies: '@babel/code-frame': 7.24.7 '@babel/parser': 7.25.6 '@babel/types': 7.25.6 - '@babel/traverse@7.24.8': - dependencies: - '@babel/code-frame': 7.24.7 - '@babel/generator': 7.24.10 - '@babel/helper-environment-visitor': 7.24.7 - '@babel/helper-function-name': 7.24.7 - '@babel/helper-hoist-variables': 7.24.7 - '@babel/helper-split-export-declaration': 7.24.7 - '@babel/parser': 7.24.8 - '@babel/types': 7.24.9 - debug: 4.3.5 - globals: 11.12.0 - transitivePeerDependencies: - - supports-color - '@babel/traverse@7.25.6': dependencies: '@babel/code-frame': 7.24.7 @@ -4041,62 +4229,60 @@ snapshots: '@babel/parser': 7.25.6 '@babel/template': 7.25.0 '@babel/types': 7.25.6 - debug: 4.3.5 + debug: 4.3.7 globals: 11.12.0 transitivePeerDependencies: - supports-color - '@babel/types@7.24.9': + '@babel/types@7.25.6': dependencies: '@babel/helper-string-parser': 7.24.8 '@babel/helper-validator-identifier': 7.24.7 to-fast-properties: 2.0.0 - '@babel/types@7.25.6': + '@changesets/changelog-github@0.4.8': dependencies: - '@babel/helper-string-parser': 7.24.8 - '@babel/helper-validator-identifier': 7.24.7 - to-fast-properties: 2.0.0 + '@changesets/get-github-info': 0.5.2 + '@changesets/types': 5.2.1 + dotenv: 8.6.0 + transitivePeerDependencies: + - encoding - '@codemirror/autocomplete@6.12.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.28.5)(@lezer/common@1.2.1)': + '@changesets/get-github-info@0.5.2': dependencies: - '@codemirror/language': 6.10.2 - '@codemirror/state': 6.4.1 - '@codemirror/view': 6.28.5 - '@lezer/common': 1.2.1 + dataloader: 1.4.0 + node-fetch: 2.7.0 + transitivePeerDependencies: + - encoding + + '@changesets/types@5.2.1': {} - '@codemirror/autocomplete@6.12.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1)': + '@codemirror/autocomplete@6.12.0(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1)': dependencies: - '@codemirror/language': 6.10.2 + '@codemirror/language': 6.10.3 '@codemirror/state': 6.4.1 '@codemirror/view': 6.29.0 '@lezer/common': 1.2.1 - '@codemirror/autocomplete@6.12.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.34.1)(@lezer/common@1.2.1)': + '@codemirror/autocomplete@6.18.1(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1)': dependencies: - '@codemirror/language': 6.10.2 + '@codemirror/language': 6.10.3 '@codemirror/state': 6.4.1 - '@codemirror/view': 6.34.1 + '@codemirror/view': 6.29.0 '@lezer/common': 1.2.1 + optional: true '@codemirror/commands@6.6.0': dependencies: - '@codemirror/language': 6.10.2 - '@codemirror/state': 6.4.1 - '@codemirror/view': 6.28.5 - '@lezer/common': 1.2.1 - - '@codemirror/commands@6.7.0': - dependencies: - '@codemirror/language': 6.10.2 + '@codemirror/language': 6.10.3 '@codemirror/state': 6.4.1 - '@codemirror/view': 6.34.1 + '@codemirror/view': 6.29.0 '@lezer/common': 1.2.1 '@codemirror/lang-javascript@6.2.2': dependencies: - '@codemirror/autocomplete': 6.12.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1) - '@codemirror/language': 6.10.2 + '@codemirror/autocomplete': 6.12.0(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1) + '@codemirror/language': 6.10.3 '@codemirror/lint': 6.8.1 '@codemirror/state': 6.4.1 '@codemirror/view': 6.29.0 @@ -4105,72 +4291,91 @@ snapshots: '@codemirror/lang-json@6.0.1': dependencies: - '@codemirror/language': 6.10.2 + '@codemirror/language': 6.10.3 '@lezer/json': 1.0.2 - '@codemirror/lang-python@6.1.3(@codemirror/state@6.4.1)(@codemirror/view@6.28.5)(@lezer/common@1.2.1)': + '@codemirror/lang-python@6.1.3(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1)': dependencies: - '@codemirror/autocomplete': 6.12.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.28.5)(@lezer/common@1.2.1) - '@codemirror/language': 6.10.2 + '@codemirror/autocomplete': 6.12.0(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1) + '@codemirror/language': 6.10.3 '@lezer/python': 1.1.14 transitivePeerDependencies: - '@codemirror/state' - '@codemirror/view' - '@lezer/common' - '@codemirror/language@6.10.2': + '@codemirror/lang-yaml@6.1.1(@codemirror/view@6.29.0)': dependencies: + '@codemirror/autocomplete': 6.12.0(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1) + '@codemirror/language': 6.10.3 '@codemirror/state': 6.4.1 - '@codemirror/view': 6.28.5 '@lezer/common': 1.2.1 - '@lezer/highlight': 1.2.0 - '@lezer/lr': 1.4.1 - style-mod: 4.1.2 + '@lezer/highlight': 1.2.1 + '@lezer/yaml': 1.0.3 + transitivePeerDependencies: + - '@codemirror/view' + optional: true - '@codemirror/lint@6.8.1': + '@codemirror/language@6.10.3': dependencies: '@codemirror/state': 6.4.1 - '@codemirror/view': 6.28.5 - crelt: 1.0.6 + '@codemirror/view': 6.29.0 + '@lezer/common': 1.2.1 + '@lezer/highlight': 1.2.1 + '@lezer/lr': 1.4.2 + style-mod: 4.1.2 - '@codemirror/lint@6.8.2': + '@codemirror/lint@6.8.1': dependencies: '@codemirror/state': 6.4.1 - '@codemirror/view': 6.34.1 + '@codemirror/view': 6.29.0 crelt: 1.0.6 '@codemirror/search@6.5.6': dependencies: '@codemirror/state': 6.4.1 - '@codemirror/view': 6.34.1 + '@codemirror/view': 6.29.0 crelt: 1.0.6 '@codemirror/state@6.4.1': {} '@codemirror/theme-one-dark@6.1.2': dependencies: - '@codemirror/language': 6.10.2 + '@codemirror/language': 6.10.3 '@codemirror/state': 6.4.1 - '@codemirror/view': 6.34.1 + '@codemirror/view': 6.29.0 '@lezer/highlight': 1.2.1 - '@codemirror/view@6.28.5': + '@codemirror/view@6.29.0': dependencies: '@codemirror/state': 6.4.1 style-mod: 4.1.2 w3c-keyname: 2.2.8 - '@codemirror/view@6.29.0': + '@dnd-kit/accessibility@3.1.0(react@18.3.1)': dependencies: - '@codemirror/state': 6.4.1 - style-mod: 4.1.2 - w3c-keyname: 2.2.8 + react: 18.3.1 + tslib: 2.6.3 - '@codemirror/view@6.34.1': + '@dnd-kit/core@6.1.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@codemirror/state': 6.4.1 - style-mod: 4.1.2 - w3c-keyname: 2.2.8 + '@dnd-kit/accessibility': 3.1.0(react@18.3.1) + '@dnd-kit/utilities': 3.2.2(react@18.3.1) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + tslib: 2.6.3 + + '@dnd-kit/sortable@8.0.0(@dnd-kit/core@6.1.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)': + dependencies: + '@dnd-kit/core': 6.1.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@dnd-kit/utilities': 3.2.2(react@18.3.1) + react: 18.3.1 + tslib: 2.6.3 + + '@dnd-kit/utilities@3.2.2(react@18.3.1)': + dependencies: + react: 18.3.1 + tslib: 2.6.3 '@emotion/babel-plugin@11.11.0': dependencies: @@ -4315,7 +4520,7 @@ snapshots: '@eslint/eslintrc@2.1.4': dependencies: ajv: 6.12.6 - debug: 4.3.5 + debug: 4.3.7 espree: 9.6.1 globals: 13.24.0 ignore: 5.3.1 @@ -4355,7 +4560,7 @@ snapshots: '@humanwhocodes/config-array@0.11.14': dependencies: '@humanwhocodes/object-schema': 2.0.3 - debug: 4.3.5 + debug: 4.3.7 minimatch: 3.1.2 transitivePeerDependencies: - supports-color @@ -4400,9 +4605,10 @@ snapshots: '@lezer/common@1.2.1': {} - '@lezer/highlight@1.2.0': + '@lezer/generator@1.7.1': dependencies: '@lezer/common': 1.2.1 + '@lezer/lr': 1.4.2 '@lezer/highlight@1.2.1': dependencies: @@ -4411,24 +4617,31 @@ snapshots: '@lezer/javascript@1.4.17': dependencies: '@lezer/common': 1.2.1 - '@lezer/highlight': 1.2.0 - '@lezer/lr': 1.4.1 + '@lezer/highlight': 1.2.1 + '@lezer/lr': 1.4.2 '@lezer/json@1.0.2': dependencies: '@lezer/common': 1.2.1 - '@lezer/highlight': 1.2.0 - '@lezer/lr': 1.4.1 + '@lezer/highlight': 1.2.1 + '@lezer/lr': 1.4.2 - '@lezer/lr@1.4.1': + '@lezer/lr@1.4.2': dependencies: '@lezer/common': 1.2.1 '@lezer/python@1.1.14': dependencies: '@lezer/common': 1.2.1 - '@lezer/highlight': 1.2.0 - '@lezer/lr': 1.4.1 + '@lezer/highlight': 1.2.1 + '@lezer/lr': 1.4.2 + + '@lezer/yaml@1.0.3': + dependencies: + '@lezer/common': 1.2.1 + '@lezer/highlight': 1.2.1 + '@lezer/lr': 1.4.2 + optional: true '@mediapipe/tasks-vision@0.10.8': {} @@ -5109,6 +5322,45 @@ snapshots: '@rollup/rollup-win32-x64-msvc@4.24.0': optional: true + '@sagold/json-pointer@5.1.2': {} + + '@sagold/json-query@6.2.0': + dependencies: + '@sagold/json-pointer': 5.1.2 + ebnf: 1.9.1 + + '@shikijs/core@1.22.0': + dependencies: + '@shikijs/engine-javascript': 1.22.0 + '@shikijs/engine-oniguruma': 1.22.0 + '@shikijs/types': 1.22.0 + '@shikijs/vscode-textmate': 9.3.0 + '@types/hast': 3.0.4 + hast-util-to-html: 9.0.3 + + '@shikijs/engine-javascript@1.22.0': + dependencies: + '@shikijs/types': 1.22.0 + '@shikijs/vscode-textmate': 9.3.0 + oniguruma-to-js: 0.4.3 + + '@shikijs/engine-oniguruma@1.22.0': + dependencies: + '@shikijs/types': 1.22.0 + '@shikijs/vscode-textmate': 9.3.0 + + '@shikijs/markdown-it@1.22.0': + dependencies: + markdown-it: 14.1.0 + shiki: 1.22.0 + + '@shikijs/types@1.22.0': + dependencies: + '@shikijs/vscode-textmate': 9.3.0 + '@types/hast': 3.0.4 + + '@shikijs/vscode-textmate@9.3.0': {} + '@swc/helpers@0.5.12': dependencies: tslib: 2.6.3 @@ -5125,24 +5377,24 @@ snapshots: '@types/babel__core@7.20.5': dependencies: - '@babel/parser': 7.24.8 - '@babel/types': 7.24.9 + '@babel/parser': 7.25.6 + '@babel/types': 7.25.6 '@types/babel__generator': 7.6.8 '@types/babel__template': 7.4.4 '@types/babel__traverse': 7.20.6 '@types/babel__generator@7.6.8': dependencies: - '@babel/types': 7.24.9 + '@babel/types': 7.25.6 '@types/babel__template@7.4.4': dependencies: - '@babel/parser': 7.24.8 - '@babel/types': 7.24.9 + '@babel/parser': 7.25.6 + '@babel/types': 7.25.6 '@types/babel__traverse@7.20.6': dependencies: - '@babel/types': 7.24.9 + '@babel/types': 7.25.6 '@types/d3-array@3.2.1': {} @@ -5188,9 +5440,7 @@ snapshots: '@types/estree-jsx@1.0.5': dependencies: - '@types/estree': 1.0.5 - - '@types/estree@1.0.5': {} + '@types/estree': 1.0.6 '@types/estree@1.0.6': {} @@ -5198,6 +5448,8 @@ snapshots: dependencies: '@types/unist': 3.0.2 + '@types/json-schema@7.0.15': {} + '@types/lodash@4.17.7': {} '@types/mdast@4.0.4': @@ -5206,6 +5458,10 @@ snapshots: '@types/ms@0.7.34': {} + '@types/node@20.16.11': + dependencies: + undici-types: 6.19.8 + '@types/node@22.5.4': dependencies: undici-types: 6.19.8 @@ -5214,7 +5470,7 @@ snapshots: '@types/parse-json@4.0.2': {} - '@types/prop-types@15.7.12': {} + '@types/prop-types@15.7.13': {} '@types/react-dom@18.3.0': dependencies: @@ -5231,7 +5487,7 @@ snapshots: '@types/react@18.3.10': dependencies: - '@types/prop-types': 15.7.12 + '@types/prop-types': 15.7.13 csstype: 3.1.3 '@types/recharts@1.8.29': @@ -5285,7 +5541,7 @@ snapshots: '@typescript-eslint/types': 7.16.1 '@typescript-eslint/typescript-estree': 7.16.1(typescript@5.4.5) '@typescript-eslint/visitor-keys': 7.16.1 - debug: 4.3.5 + debug: 4.3.7 eslint: 8.57.0 optionalDependencies: typescript: 5.4.5 @@ -5301,7 +5557,7 @@ snapshots: dependencies: '@typescript-eslint/typescript-estree': 7.16.1(typescript@5.4.5) '@typescript-eslint/utils': 7.16.1(eslint@8.57.0)(typescript@5.4.5) - debug: 4.3.5 + debug: 4.3.7 eslint: 8.57.0 ts-api-utils: 1.3.0(typescript@5.4.5) optionalDependencies: @@ -5315,7 +5571,7 @@ snapshots: dependencies: '@typescript-eslint/types': 7.16.1 '@typescript-eslint/visitor-keys': 7.16.1 - debug: 4.3.5 + debug: 4.3.7 globby: 11.1.0 is-glob: 4.0.3 minimatch: 9.0.5 @@ -5342,38 +5598,52 @@ snapshots: '@typescript-eslint/types': 7.16.1 eslint-visitor-keys: 3.4.3 - '@uiw/codemirror-extensions-basic-setup@4.23.0(@codemirror/autocomplete@6.12.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.28.5)(@lezer/common@1.2.1))(@codemirror/commands@6.6.0)(@codemirror/language@6.10.2)(@codemirror/lint@6.8.1)(@codemirror/search@6.5.6)(@codemirror/state@6.4.1)(@codemirror/view@6.28.5)': + '@uiw/codemirror-extensions-basic-setup@4.23.0(@codemirror/autocomplete@6.12.0(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1))(@codemirror/commands@6.6.0)(@codemirror/language@6.10.3)(@codemirror/lint@6.8.1)(@codemirror/search@6.5.6)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)': dependencies: - '@codemirror/autocomplete': 6.12.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.28.5)(@lezer/common@1.2.1) + '@codemirror/autocomplete': 6.12.0(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1) '@codemirror/commands': 6.6.0 - '@codemirror/language': 6.10.2 + '@codemirror/language': 6.10.3 '@codemirror/lint': 6.8.1 '@codemirror/search': 6.5.6 '@codemirror/state': 6.4.1 - '@codemirror/view': 6.28.5 + '@codemirror/view': 6.29.0 + + '@uiw/codemirror-theme-github@4.23.5(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)': + dependencies: + '@uiw/codemirror-themes': 4.23.5(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0) + transitivePeerDependencies: + - '@codemirror/language' + - '@codemirror/state' + - '@codemirror/view' - '@uiw/codemirror-theme-nord@4.23.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.28.5)': + '@uiw/codemirror-theme-nord@4.23.0(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)': dependencies: - '@uiw/codemirror-themes': 4.23.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.28.5) + '@uiw/codemirror-themes': 4.23.0(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0) transitivePeerDependencies: - '@codemirror/language' - '@codemirror/state' - '@codemirror/view' - '@uiw/codemirror-themes@4.23.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.28.5)': + '@uiw/codemirror-themes@4.23.0(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)': + dependencies: + '@codemirror/language': 6.10.3 + '@codemirror/state': 6.4.1 + '@codemirror/view': 6.29.0 + + '@uiw/codemirror-themes@4.23.5(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)': dependencies: - '@codemirror/language': 6.10.2 + '@codemirror/language': 6.10.3 '@codemirror/state': 6.4.1 - '@codemirror/view': 6.28.5 + '@codemirror/view': 6.29.0 - '@uiw/react-codemirror@4.23.0(@babel/runtime@7.24.8)(@codemirror/autocomplete@6.12.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.28.5)(@lezer/common@1.2.1))(@codemirror/language@6.10.2)(@codemirror/lint@6.8.1)(@codemirror/search@6.5.6)(@codemirror/state@6.4.1)(@codemirror/theme-one-dark@6.1.2)(@codemirror/view@6.28.5)(codemirror@6.0.1(@lezer/common@1.2.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@uiw/react-codemirror@4.23.0(@babel/runtime@7.24.8)(@codemirror/autocomplete@6.12.0(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1))(@codemirror/language@6.10.3)(@codemirror/lint@6.8.1)(@codemirror/search@6.5.6)(@codemirror/state@6.4.1)(@codemirror/theme-one-dark@6.1.2)(@codemirror/view@6.29.0)(codemirror@6.0.1(@lezer/common@1.2.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@babel/runtime': 7.24.8 '@codemirror/commands': 6.6.0 '@codemirror/state': 6.4.1 '@codemirror/theme-one-dark': 6.1.2 - '@codemirror/view': 6.28.5 - '@uiw/codemirror-extensions-basic-setup': 4.23.0(@codemirror/autocomplete@6.12.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.28.5)(@lezer/common@1.2.1))(@codemirror/commands@6.6.0)(@codemirror/language@6.10.2)(@codemirror/lint@6.8.1)(@codemirror/search@6.5.6)(@codemirror/state@6.4.1)(@codemirror/view@6.28.5) + '@codemirror/view': 6.29.0 + '@uiw/codemirror-extensions-basic-setup': 4.23.0(@codemirror/autocomplete@6.12.0(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1))(@codemirror/commands@6.6.0)(@codemirror/language@6.10.3)(@codemirror/lint@6.8.1)(@codemirror/search@6.5.6)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0) codemirror: 6.0.1(@lezer/common@1.2.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) @@ -5572,7 +5842,7 @@ snapshots: cosmiconfig: 7.1.0 resolve: 1.22.8 - babel-plugin-relay@17.0.0: + babel-plugin-relay@18.1.0: dependencies: babel-plugin-macros: 2.8.0 cosmiconfig: 5.2.1 @@ -5679,15 +5949,55 @@ snapshots: clsx@2.1.1: {} + codemirror-json-schema@0.7.8(@codemirror/language@6.10.3)(@codemirror/lint@6.8.1)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1)(typescript@5.4.5)(vite@5.3.6(@types/node@22.5.4)): + dependencies: + '@changesets/changelog-github': 0.4.8 + '@codemirror/language': 6.10.3 + '@codemirror/lint': 6.8.1 + '@codemirror/state': 6.4.1 + '@codemirror/view': 6.29.0 + '@lezer/common': 1.2.1 + '@sagold/json-pointer': 5.1.2 + '@shikijs/markdown-it': 1.22.0 + '@types/json-schema': 7.0.15 + '@types/node': 20.16.11 + json-schema: 0.4.0 + json-schema-library: 9.3.5 + markdown-it: 14.1.0 + vite-tsconfig-paths: 4.3.2(typescript@5.4.5)(vite@5.3.6(@types/node@22.5.4)) + yaml: 2.6.0 + optionalDependencies: + '@codemirror/autocomplete': 6.18.1(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1) + '@codemirror/lang-json': 6.0.1 + '@codemirror/lang-yaml': 6.1.1(@codemirror/view@6.29.0) + codemirror-json5: 1.0.3 + json5: 2.2.3 + transitivePeerDependencies: + - encoding + - supports-color + - typescript + - vite + + codemirror-json5@1.0.3: + dependencies: + '@codemirror/language': 6.10.3 + '@codemirror/state': 6.4.1 + '@codemirror/view': 6.29.0 + '@lezer/common': 1.2.1 + '@lezer/highlight': 1.2.1 + json5: 2.2.3 + lezer-json5: 2.0.2 + optional: true + codemirror@6.0.1(@lezer/common@1.2.1): dependencies: - '@codemirror/autocomplete': 6.12.0(@codemirror/language@6.10.2)(@codemirror/state@6.4.1)(@codemirror/view@6.34.1)(@lezer/common@1.2.1) - '@codemirror/commands': 6.7.0 - '@codemirror/language': 6.10.2 - '@codemirror/lint': 6.8.2 + '@codemirror/autocomplete': 6.12.0(@codemirror/language@6.10.3)(@codemirror/state@6.4.1)(@codemirror/view@6.29.0)(@lezer/common@1.2.1) + '@codemirror/commands': 6.6.0 + '@codemirror/language': 6.10.3 + '@codemirror/lint': 6.8.1 '@codemirror/search': 6.5.6 '@codemirror/state': 6.4.1 - '@codemirror/view': 6.34.1 + '@codemirror/view': 6.29.0 transitivePeerDependencies: - '@lezer/common' @@ -5709,6 +6019,8 @@ snapshots: comma-separated-tokens@2.0.3: {} + commander@2.20.3: {} + concat-map@0.0.1: {} convert-source-map@1.9.0: {} @@ -5782,6 +6094,8 @@ snapshots: shebang-command: 2.0.0 which: 2.0.2 + cssfontparser@1.2.1: {} + cssstyle@4.1.0: dependencies: rrweb-cssom: 0.7.1 @@ -5854,14 +6168,12 @@ snapshots: es-errors: 1.3.0 is-data-view: 1.0.1 + dataloader@1.4.0: {} + date-fns@3.6.0: {} debounce@1.2.1: {} - debug@4.3.5: - dependencies: - ms: 2.1.2 - debug@4.3.7: dependencies: ms: 2.1.3 @@ -5878,6 +6190,8 @@ snapshots: deep-is@0.1.4: {} + deepmerge@4.3.1: {} + define-data-property@1.1.4: dependencies: es-define-property: 1.0.0 @@ -5908,6 +6222,8 @@ snapshots: dependencies: path-type: 4.0.0 + discontinuous-range@1.0.0: {} + doctrine@2.1.0: dependencies: esutils: 2.0.3 @@ -5921,8 +6237,12 @@ snapshots: '@babel/runtime': 7.24.8 csstype: 3.1.3 + dotenv@8.6.0: {} + draco3d@1.5.7: {} + ebnf@1.9.1: {} + electron-to-chromium@1.4.829: {} emoji-regex@8.0.0: {} @@ -6066,7 +6386,7 @@ snapshots: eslint-plugin-react-compiler@0.0.0-experimental-42acc6a-20241001(eslint@8.57.0): dependencies: '@babel/core': 7.24.9 - '@babel/parser': 7.24.8 + '@babel/parser': 7.25.6 '@babel/plugin-proposal-private-methods': 7.18.6(@babel/core@7.24.9) eslint: 8.57.0 hermes-parser: 0.20.1 @@ -6126,7 +6446,7 @@ snapshots: ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.3 - debug: 4.3.5 + debug: 4.3.7 doctrine: 3.0.0 escape-string-regexp: 4.0.0 eslint-scope: 7.2.2 @@ -6186,6 +6506,8 @@ snapshots: extend@3.0.2: {} + fast-copy@3.0.2: {} + fast-deep-equal@3.1.3: {} fast-equals@5.0.1: {} @@ -6251,7 +6573,7 @@ snapshots: dependencies: is-callable: 1.2.7 - form-data@4.0.1: + form-data@4.0.0: dependencies: asynckit: 0.4.0 combined-stream: 1.0.8 @@ -6339,6 +6661,8 @@ snapshots: merge2: 1.4.1 slash: 4.0.0 + globrex@0.1.2: {} + glsl-noise@0.0.0: {} gopd@1.0.1: @@ -6349,6 +6673,10 @@ snapshots: graphemer@1.4.0: {} + graphql-ws@5.16.0(graphql@16.9.0): + dependencies: + graphql: 16.9.0 + graphql@15.3.0: {} graphql@16.9.0: {} @@ -6375,9 +6703,23 @@ snapshots: dependencies: function-bind: 1.1.2 + hast-util-to-html@9.0.3: + dependencies: + '@types/hast': 3.0.4 + '@types/unist': 3.0.2 + ccount: 2.0.1 + comma-separated-tokens: 2.0.3 + hast-util-whitespace: 3.0.0 + html-void-elements: 3.0.0 + mdast-util-to-hast: 13.2.0 + property-information: 6.5.0 + space-separated-tokens: 2.0.2 + stringify-entities: 4.0.4 + zwitch: 2.0.4 + hast-util-to-jsx-runtime@2.3.0: dependencies: - '@types/estree': 1.0.5 + '@types/estree': 1.0.6 '@types/hast': 3.0.4 '@types/unist': 3.0.2 comma-separated-tokens: 2.0.3 @@ -6417,6 +6759,8 @@ snapshots: html-url-attributes@3.0.0: {} + html-void-elements@3.0.0: {} + http-proxy-agent@7.0.2: dependencies: agent-base: 7.1.1 @@ -6614,6 +6958,11 @@ snapshots: reflect.getprototypeof: 1.0.6 set-function-name: 2.0.2 + jest-canvas-mock@2.5.2: + dependencies: + cssfontparser: 1.2.1 + moo-color: 1.0.3 + js-tokens@4.0.0: {} js-yaml@3.14.1: @@ -6630,12 +6979,12 @@ snapshots: cssstyle: 4.1.0 data-urls: 5.0.0 decimal.js: 10.4.3 - form-data: 4.0.1 + form-data: 4.0.0 html-encoding-sniffer: 4.0.0 http-proxy-agent: 7.0.2 https-proxy-agent: 7.0.5 is-potential-custom-element-name: 1.0.1 - nwsapi: 2.2.13 + nwsapi: 2.2.12 parse5: 7.1.2 rrweb-cssom: 0.7.1 saxes: 6.0.0 @@ -6661,8 +7010,20 @@ snapshots: json-parse-even-better-errors@2.3.1: {} + json-schema-library@9.3.5: + dependencies: + '@sagold/json-pointer': 5.1.2 + '@sagold/json-query': 6.2.0 + deepmerge: 4.3.1 + fast-copy: 3.0.2 + fast-deep-equal: 3.1.3 + smtp-address-parser: 1.0.10 + valid-url: 1.0.9 + json-schema-traverse@0.4.1: {} + json-schema@0.4.0: {} + json-stable-stringify-without-jsonify@1.0.1: {} json5@2.2.3: {} @@ -6685,12 +7046,21 @@ snapshots: prelude-ls: 1.2.1 type-check: 0.4.0 + lezer-json5@2.0.2: + dependencies: + '@lezer/lr': 1.4.2 + optional: true + lie@3.3.0: dependencies: immediate: 3.0.6 lines-and-columns@1.2.4: {} + linkify-it@5.0.0: + dependencies: + uc.micro: 2.1.0 + locate-path@6.0.0: dependencies: p-locate: 5.0.0 @@ -6720,6 +7090,15 @@ snapshots: dependencies: '@jridgewell/sourcemap-codec': 1.5.0 + markdown-it@14.1.0: + dependencies: + argparse: 2.0.1 + entities: 4.5.0 + linkify-it: 5.0.0 + mdurl: 2.0.0 + punycode.js: 2.3.1 + uc.micro: 2.1.0 + markdown-table@3.0.3: {} mdast-util-find-and-replace@3.0.1: @@ -6875,6 +7254,8 @@ snapshots: dependencies: '@types/mdast': 4.0.4 + mdurl@2.0.0: {} + meow@12.1.1: {} merge2@1.4.1: {} @@ -7057,7 +7438,7 @@ snapshots: micromark@4.0.0: dependencies: '@types/debug': 4.1.12 - debug: 4.3.5 + debug: 4.3.7 decode-named-character-reference: 1.0.2 devlop: 1.1.0 micromark-core-commonmark: 2.0.1 @@ -7097,7 +7478,11 @@ snapshots: mkdirp@0.3.5: {} - ms@2.1.2: {} + moo-color@1.0.3: + dependencies: + color-name: 1.1.4 + + moo@0.5.2: {} ms@2.1.3: {} @@ -7105,6 +7490,13 @@ snapshots: natural-compare@1.4.0: {} + nearley@2.20.1: + dependencies: + commander: 2.20.3 + moo: 0.5.2 + railroad-diagrams: 1.0.0 + randexp: 0.4.6 + nested-error-stacks@2.1.1: {} node-fetch@2.7.0: @@ -7117,7 +7509,7 @@ snapshots: nullthrows@1.1.1: {} - nwsapi@2.2.13: {} + nwsapi@2.2.12: {} object-assign@4.1.1: {} @@ -7155,6 +7547,10 @@ snapshots: dependencies: wrappy: 1.0.2 + oniguruma-to-js@0.4.3: + dependencies: + regex: 4.3.3 + only-allow@1.2.1: dependencies: which-pm-runs: 1.1.0 @@ -7290,10 +7686,19 @@ snapshots: property-information@6.5.0: {} + punycode.js@2.3.1: {} + punycode@2.3.1: {} queue-microtask@1.2.3: {} + railroad-diagrams@1.0.0: {} + + randexp@0.4.6: + dependencies: + discontinuous-range: 1.0.0 + ret: 0.1.15 + react-composer@5.0.3(react@18.3.1): dependencies: prop-types: 15.8.1 @@ -7432,6 +7837,8 @@ snapshots: regenerator-runtime@0.14.1: {} + regex@4.3.3: {} + regexp.prototype.flags@1.5.2: dependencies: call-bind: 1.0.7 @@ -7503,6 +7910,8 @@ snapshots: path-parse: 1.0.7 supports-preserve-symlinks-flag: 1.0.0 + ret@0.1.15: {} + reusify@1.0.4: {} rimraf@3.0.2: @@ -7605,6 +8014,15 @@ snapshots: shebang-regex@3.0.0: {} + shiki@1.22.0: + dependencies: + '@shikijs/core': 1.22.0 + '@shikijs/engine-javascript': 1.22.0 + '@shikijs/engine-oniguruma': 1.22.0 + '@shikijs/types': 1.22.0 + '@shikijs/vscode-textmate': 9.3.0 + '@types/hast': 3.0.4 + side-channel@1.0.6: dependencies: call-bind: 1.0.7 @@ -7618,6 +8036,10 @@ snapshots: slash@4.0.0: {} + smtp-address-parser@1.0.10: + dependencies: + nearley: 2.20.1 + source-map-js@1.2.0: {} source-map@0.5.7: {} @@ -7798,6 +8220,10 @@ snapshots: dependencies: typescript: 5.4.5 + tsconfck@3.1.4(typescript@5.4.5): + optionalDependencies: + typescript: 5.4.5 + tslib@2.6.3: {} tunnel-rat@0.1.2(@types/react@18.3.10)(react@18.3.1): @@ -7850,6 +8276,8 @@ snapshots: ua-parser-js@1.0.38: {} + uc.micro@2.1.0: {} + unbox-primitive@1.0.2: dependencies: call-bind: 1.0.7 @@ -7925,6 +8353,8 @@ snapshots: uuid@9.0.1: {} + valid-url@1.0.9: {} + vfile-message@4.0.2: dependencies: '@types/unist': 3.0.2 @@ -7969,13 +8399,24 @@ snapshots: - supports-color - terser - vite-plugin-relay@2.1.0(babel-plugin-relay@17.0.0)(vite@5.3.6(@types/node@22.5.4)): + vite-plugin-relay@2.1.0(babel-plugin-relay@18.1.0)(vite@5.3.6(@types/node@22.5.4)): dependencies: '@babel/core': 7.24.9 - babel-plugin-relay: 17.0.0 + babel-plugin-relay: 18.1.0 + vite: 5.3.6(@types/node@22.5.4) + transitivePeerDependencies: + - supports-color + + vite-tsconfig-paths@4.3.2(typescript@5.4.5)(vite@5.3.6(@types/node@22.5.4)): + dependencies: + debug: 4.3.7 + globrex: 0.1.2 + tsconfck: 3.1.4(typescript@5.4.5) + optionalDependencies: vite: 5.3.6(@types/node@22.5.4) transitivePeerDependencies: - supports-color + - typescript vite@5.3.6(@types/node@22.5.4): dependencies: @@ -7986,6 +8427,11 @@ snapshots: '@types/node': 22.5.4 fsevents: 2.3.3 + vitest-canvas-mock@0.3.3(vitest@2.1.2(@types/node@22.5.4)(jsdom@25.0.1)): + dependencies: + jest-canvas-mock: 2.5.2 + vitest: 2.1.2(@types/node@22.5.4)(jsdom@25.0.1) + vitest@2.1.2(@types/node@22.5.4)(jsdom@25.0.1): dependencies: '@vitest/expect': 2.1.2 @@ -8121,6 +8567,8 @@ snapshots: yaml@1.10.2: {} + yaml@2.6.0: {} + yargs-parser@21.1.1: {} yargs@17.7.2: @@ -8135,6 +8583,10 @@ snapshots: yocto-queue@0.1.0: {} + zod-to-json-schema@3.23.3(zod@3.23.8): + dependencies: + zod: 3.23.8 + zod-validation-error@3.4.0(zod@3.23.8): dependencies: zod: 3.23.8 diff --git a/app/schema.graphql b/app/schema.graphql index 9d3c39903d..41404e419a 100644 --- a/app/schema.graphql +++ b/app/schema.graphql @@ -65,6 +65,30 @@ enum AuthMethod { union Bin = NominalBin | IntervalBin | MissingValueBin +union ChatCompletionChunk = TextChunk | ToolCallChunk + +input ChatCompletionInput { + messages: [ChatCompletionMessageInput!]! + model: GenerativeModelInput! + invocationParameters: InvocationParameters! + tools: [JSON!] + apiKey: String = null +} + +input ChatCompletionMessageInput { + role: ChatCompletionMessageRole! + + """The content of the message as JSON to support text and tools""" + content: JSON! +} + +enum ChatCompletionMessageRole { + USER + SYSTEM + TOOL + AI +} + input ClearProjectInput { id: GlobalID! @@ -796,6 +820,11 @@ type ExportedFile { fileName: String! } +type FunctionCallChunk { + name: String! + arguments: String! +} + type Functionality { """Model inferences are available for analysis""" modelInferences: Boolean! @@ -804,6 +833,22 @@ type Functionality { tracing: Boolean! } +input GenerativeModelInput { + providerKey: GenerativeProviderKey! + name: String! +} + +type GenerativeProvider { + name: String! + key: GenerativeProviderKey! +} + +enum GenerativeProviderKey { + OPENAI + ANTHROPIC + AZURE_OPENAI +} + """ The `ID` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as `"4"`) or integer (such as `4`) input value will be accepted as an ID. """ @@ -859,6 +904,16 @@ type IntervalBin { range: NumericRange! } +input InvocationParameters { + temperature: Float + maxCompletionTokens: Int + maxTokens: Int + topP: Float + stop: [String!] + seed: Int + toolChoice: JSON +} + """ The `JSON` scalar type represents JSON values as specified by [ECMA-404](https://ecma-international.org/wp-content/uploads/ECMA-404_2nd_edition_december_2017.pdf). """ @@ -908,6 +963,10 @@ type Model { ): PerformanceTimeSeries! } +input ModelNamesInput { + providerKey: GenerativeProviderKey! +} + type Mutation { createSystemApiKey(input: CreateApiKeyInput!): CreateSystemApiKeyMutationPayload! createUserApiKey(input: CreateUserApiKeyInput!): CreateUserApiKeyMutationPayload! @@ -1100,6 +1159,8 @@ type PromptResponse { } type Query { + modelProviders: [GenerativeProvider!]! + modelNames(input: ModelNamesInput!): [String!]! users(first: Int = 50, last: Int, after: String, before: String): UserConnection! userRoles: [UserRole!]! userApiKeys: [UserApiKey!]! @@ -1372,6 +1433,10 @@ enum SpanStatusCode { UNSET } +type Subscription { + chatCompletion(input: ChatCompletionInput!): ChatCompletionChunk! +} + type SystemApiKey implements ApiKey & Node { """Name of the API key.""" name: String! @@ -1389,6 +1454,10 @@ type SystemApiKey implements ApiKey & Node { id: GlobalID! } +type TextChunk { + content: String! +} + input TimeRange { """The start of the time range""" start: DateTime! @@ -1406,6 +1475,11 @@ type TimeSeriesDataPoint { value: Float } +type ToolCallChunk { + id: String! + function: FunctionCallChunk! +} + type Trace implements Node { """The Globally Unique ID of this object""" id: GlobalID! diff --git a/app/src/@types/generative.d.ts b/app/src/@types/generative.d.ts new file mode 100644 index 0000000000..ffe3285ea3 --- /dev/null +++ b/app/src/@types/generative.d.ts @@ -0,0 +1,17 @@ +declare type ModelProvider = "OPENAI" | "AZURE_OPENAI" | "ANTHROPIC"; + +/** + * The role of a chat message + */ +declare type ChatMessageRole = "user" | "system" | "ai" | "tool"; + +/** + * The tool picking mechanism for an LLM + * Either "auto", "required", "none", or a specific tool + * @see https://platform.openai.com/docs/api-reference/chat/create#chat-create-tool_choice + */ +declare type ToolChoice = + | "auto" + | "required" + | "none" + | { type: "function"; function: { name: string } }; diff --git a/app/src/App.tsx b/app/src/App.tsx index 6f462abba4..2c98a09883 100644 --- a/app/src/App.tsx +++ b/app/src/App.tsx @@ -4,6 +4,7 @@ import { ThemeProvider as EmotionThemeProvider } from "@emotion/react"; import { Provider, theme } from "@arizeai/components"; +import { CredentialsProvider } from "./contexts/CredentialsContext"; import { FeatureFlagsProvider } from "./contexts/FeatureFlagsContext"; import { FunctionalityProvider } from "./contexts/FunctionalityContext"; import { PreferencesProvider } from "./contexts/PreferencesContext"; @@ -33,11 +34,13 @@ export function AppContent() { - - - - - + + + + + + + diff --git a/app/src/RelayEnvironment.ts b/app/src/RelayEnvironment.ts index 6122d551e0..ee4dbe6a51 100644 --- a/app/src/RelayEnvironment.ts +++ b/app/src/RelayEnvironment.ts @@ -1,9 +1,15 @@ +import { createClient, Sink } from "graphql-ws"; import { Environment, FetchFunction, + GraphQLResponse, Network, + Observable, RecordSource, + RequestParameters, Store, + SubscribeFunction, + Variables, } from "relay-runtime"; import { authFetch } from "@phoenix/authFetch"; @@ -50,9 +56,29 @@ const fetchRelay: FetchFunction = async (params, variables, _cacheConfig) => { return json; }; +const wsClient = createClient({ + url: "ws://localhost:6006/graphql", +}); + +const subscribe: SubscribeFunction = ( + operation: RequestParameters, + variables: Variables +) => { + return Observable.create((sink) => { + return wsClient.subscribe( + { + operationName: operation.name, + query: operation.text as string, + variables, + }, + sink as Sink + ); + }); +}; + // Export a singleton instance of Relay Environment configured with our network layer: export default new Environment({ - network: Network.create(fetchRelay), + network: Network.create(fetchRelay, subscribe), store: new Store(new RecordSource(), { // This property tells Relay to not immediately clear its cache when the user // navigates around the app. Relay will hold onto the specified number of diff --git a/app/src/Routes.tsx b/app/src/Routes.tsx index d97d015dce..dc6c42057f 100644 --- a/app/src/Routes.tsx +++ b/app/src/Routes.tsx @@ -5,6 +5,7 @@ import { createBrowserRouter } from "react-router-dom"; import { datasetLoaderQuery$data } from "./pages/dataset/__generated__/datasetLoaderQuery.graphql"; import { embeddingLoaderQuery$data } from "./pages/embedding/__generated__/embeddingLoaderQuery.graphql"; import { Layout } from "./pages/Layout"; +import { spanPlaygroundPageLoaderQuery$data } from "./pages/playground/__generated__/spanPlaygroundPageLoaderQuery.graphql"; import { projectLoaderQuery$data } from "./pages/project/__generated__/projectLoaderQuery.graphql"; import { APIsPage, @@ -40,6 +41,8 @@ import { ResetPasswordPage, ResetPasswordWithTokenPage, SettingsPage, + SpanPlaygroundPage, + spanPlaygroundPageLoader, TracePage, TracingRoot, } from "./pages"; @@ -157,11 +160,25 @@ const router = createBrowserRouter( } handle={{ crumb: () => "Playground", }} - /> + > + } /> + } + loader={spanPlaygroundPageLoader} + handle={{ + crumb: (data: spanPlaygroundPageLoaderQuery$data) => { + if (data.span.__typename === "Span") { + return `span ${data.span.context.spanId}`; + } + return "span unknown"; + }, + }} + /> + } diff --git a/app/src/components/AlphabeticIndexIcon.tsx b/app/src/components/AlphabeticIndexIcon.tsx new file mode 100644 index 0000000000..a0d944cc41 --- /dev/null +++ b/app/src/components/AlphabeticIndexIcon.tsx @@ -0,0 +1,33 @@ +import React, { useMemo } from "react"; +import { schemeSet2 } from "d3-scale-chromatic"; +import { transparentize } from "polished"; +import { css } from "@emotion/react"; + +function indexToChar(index: number) { + // Wrap around using modulo if index exceeds 'C' + const charCode = 65 + index; // 'A' has ASCII code 65, 'B' is 66, 'C' is 67 + return String.fromCharCode(charCode); +} + +export function AlphabeticIndexIcon({ index }: { index: number }) { + const char = useMemo(() => indexToChar(index), [index]); + const color = useMemo(() => schemeSet2[index % 8], [index]); + const backgroundColor = useMemo(() => transparentize(0.8, color), [color]); + return ( +
+ {char} +
+ ); +} diff --git a/app/src/components/auth/OneTimeAPIKeyDialog.tsx b/app/src/components/auth/OneTimeAPIKeyDialog.tsx index 82e1d31c87..a85b3b86d9 100644 --- a/app/src/components/auth/OneTimeAPIKeyDialog.tsx +++ b/app/src/components/auth/OneTimeAPIKeyDialog.tsx @@ -37,7 +37,7 @@ export function OneTimeAPIKeyDialog(props: { jwt: string }) { - + diff --git a/app/src/components/code/CodeEditorFieldWrapper.tsx b/app/src/components/code/CodeEditorFieldWrapper.tsx index 438c8b3da5..3f890f651a 100644 --- a/app/src/components/code/CodeEditorFieldWrapper.tsx +++ b/app/src/components/code/CodeEditorFieldWrapper.tsx @@ -27,6 +27,7 @@ const codeEditorFormWrapperCSS = css` .cm-editor { border-radius: var(--ac-global-rounding-small); } + box-sizing: border-box; .cm-focused { outline: none; } diff --git a/app/src/components/code/CodeWrap.tsx b/app/src/components/code/CodeWrap.tsx index 6985057a93..a62a324150 100644 --- a/app/src/components/code/CodeWrap.tsx +++ b/app/src/components/code/CodeWrap.tsx @@ -1,10 +1,18 @@ import React, { ReactNode } from "react"; -import { View } from "@arizeai/components"; +import { View, ViewStyleProps } from "@arizeai/components"; -export function CodeWrap({ children }: { children: ReactNode }) { +export function CodeWrap({ + children, + ...props +}: { children: ReactNode } & ViewStyleProps) { return ( - + {children} ); diff --git a/app/src/components/code/JSONEditor.tsx b/app/src/components/code/JSONEditor.tsx index 68e7457fa3..a7459c8afa 100644 --- a/app/src/components/code/JSONEditor.tsx +++ b/app/src/components/code/JSONEditor.tsx @@ -1,24 +1,56 @@ -import React from "react"; -import { json, jsonParseLinter } from "@codemirror/lang-json"; +import React, { useMemo } from "react"; +import { json, jsonLanguage, jsonParseLinter } from "@codemirror/lang-json"; import { linter } from "@codemirror/lint"; -import { EditorView } from "@codemirror/view"; +import { EditorView, hoverTooltip } from "@codemirror/view"; +import { githubLight } from "@uiw/codemirror-theme-github"; import { nord } from "@uiw/codemirror-theme-nord"; import CodeMirror, { ReactCodeMirrorProps } from "@uiw/react-codemirror"; +import { + handleRefresh, + jsonCompletion, + jsonSchemaHover, + jsonSchemaLinter, + stateExtensions, +} from "codemirror-json-schema"; +import { JSONSchema7 } from "json-schema"; import { useTheme } from "@phoenix/contexts"; export type JSONEditorProps = Omit< ReactCodeMirrorProps, "theme" | "extensions" | "editable" ->; +> & { + /** + * JSON Schema to use for validation, if provided will enable JSON Schema validation with tooltips in the editor + */ + jsonSchema?: JSONSchema7; +}; export function JSONEditor(props: JSONEditorProps) { const { theme } = useTheme(); - const codeMirrorTheme = theme === "light" ? undefined : nord; + const codeMirrorTheme = theme === "light" ? githubLight : nord; + const extensions = useMemo(() => { + const baseExtensions = [ + json(), + EditorView.lineWrapping, + linter(jsonParseLinter()), + ]; + if (props.jsonSchema) { + baseExtensions.push( + linter(jsonSchemaLinter(), { needsRefresh: handleRefresh }), + jsonLanguage.data.of({ + autocomplete: jsonCompletion(), + }), + hoverTooltip(jsonSchemaHover()), + stateExtensions(props.jsonSchema) + ); + } + return baseExtensions; + }, [props.jsonSchema]); return ( ; + +function DragHandle( + { + listeners, + attributes, + }: { + listeners?: Listeners; + attributes: DraggableAttributes; + }, + ref: React.Ref +) { + return ( + + ); +} + +// Use Ref forwarding for DragHandle +const _DragHandle = React.forwardRef(DragHandle); +export { _DragHandle as DragHandle }; diff --git a/app/src/components/generative/ToolChoiceSelector.tsx b/app/src/components/generative/ToolChoiceSelector.tsx new file mode 100644 index 0000000000..32a0404fc3 --- /dev/null +++ b/app/src/components/generative/ToolChoiceSelector.tsx @@ -0,0 +1,102 @@ +import React from "react"; + +import { Flex, Item, Label, Picker } from "@arizeai/components"; + +type DefaultToolChoice = Extract; + +const isDefaultToolChoice = (choice: string): choice is DefaultToolChoice => { + return choice === "auto" || choice === "required" || choice === "none"; +}; + +/** + * A prefix to add to user defined tools in the picker to avoid picker key collisions with default {@link ToolChoice} keys + */ +const TOOL_NAME_PREFIX = "tool_"; + +/** + * Adds a prefix to user defined tool names to avoid conflicts picker key collisions with default {@link ToolChoice} keys + * @param toolName The name of a tool + * @returns The tool name with the "TOOL_NAME_PREFIX" prefix added + */ +const addToolNamePrefix = (toolName: string) => + `${TOOL_NAME_PREFIX}${toolName}`; + +/** + * Removes the "TOOL_NAME_PREFIX" prefix from a tool name so that it can be used as a choice that corresponds to an actual tool + * @param toolName The name of a tool with the "TOOL_NAME_PREFIX" prefix + * @returns The tool name with the "TOOL_NAME_PREFIX" prefix removed + */ +const removeToolNamePrefix = (toolName: string) => + toolName.startsWith(TOOL_NAME_PREFIX) + ? toolName.slice(TOOL_NAME_PREFIX.length) + : toolName; + +type ToolChoicePickerProps = { + /** + * The current choice including the default {@link ToolChoice} and any user defined tools + */ + choice: ToolChoice | undefined; + /** + * Callback for when the tool choice changes + */ + onChange: (choice: ToolChoice) => void; + /** + * A list of user defined tool names + */ + toolNames: string[]; +}; + +export function ToolChoicePicker({ + choice, + onChange, + toolNames, +}: ToolChoicePickerProps) { + const currentKey = + choice == null || typeof choice === "string" + ? choice + : addToolNamePrefix(choice.function.name); + return ( + { + if (typeof choice !== "string") { + return; + } + if (choice.startsWith(TOOL_NAME_PREFIX)) { + onChange({ + type: "function", + function: { + name: removeToolNamePrefix(choice), + }, + }); + } else if (isDefaultToolChoice(choice)) { + onChange(choice); + } + }} + > + {[ + + + Tools auto-selected by LLM + + , + + + Use at least one tool + + , + + + Don't use any tools + + , + // Add "TOOL_NAME_PREFIX" prefix to user defined tool names to avoid conflicts with default keys + ...toolNames.map((toolName) => ( + {toolName} + )), + ]} + + ); +} diff --git a/app/src/components/generative/index.ts b/app/src/components/generative/index.ts new file mode 100644 index 0000000000..ad6556baa2 --- /dev/null +++ b/app/src/components/generative/index.ts @@ -0,0 +1 @@ +export * from "./ToolChoiceSelector"; diff --git a/app/src/components/templateEditor/TemplateEditor.tsx b/app/src/components/templateEditor/TemplateEditor.tsx new file mode 100644 index 0000000000..2628940809 --- /dev/null +++ b/app/src/components/templateEditor/TemplateEditor.tsx @@ -0,0 +1,58 @@ +import React, { useMemo } from "react"; +import { githubLight } from "@uiw/codemirror-theme-github"; +import { nord } from "@uiw/codemirror-theme-nord"; +import CodeMirror, { + BasicSetupOptions, + ReactCodeMirrorProps, +} from "@uiw/react-codemirror"; + +import { useTheme } from "@phoenix/contexts"; +import { assertUnreachable } from "@phoenix/typeUtils"; + +import { FStringTemplating } from "./language/fString"; +import { MustacheLikeTemplating } from "./language/mustacheLike"; +import { TemplateLanguages } from "./constants"; +import { TemplateLanguage } from "./types"; + +type TemplateEditorProps = ReactCodeMirrorProps & { + templateLanguage: TemplateLanguage; +}; + +const basicSetupOptions: BasicSetupOptions = { + lineNumbers: false, + highlightActiveLine: false, + foldGutter: false, + highlightActiveLineGutter: false, + bracketMatching: false, +}; + +export const TemplateEditor = ({ + templateLanguage, + ...props +}: TemplateEditorProps) => { + const { theme } = useTheme(); + const codeMirrorTheme = theme === "light" ? githubLight : nord; + const extensions = useMemo(() => { + const ext: TemplateEditorProps["extensions"] = []; + switch (templateLanguage) { + case TemplateLanguages.FString: + ext.push(FStringTemplating()); + break; + case TemplateLanguages.Mustache: + ext.push(MustacheLikeTemplating()); + break; + default: + assertUnreachable(templateLanguage); + } + return ext; + }, [templateLanguage]); + + return ( + + ); +}; diff --git a/app/src/components/templateEditor/constants.ts b/app/src/components/templateEditor/constants.ts new file mode 100644 index 0000000000..803266784d --- /dev/null +++ b/app/src/components/templateEditor/constants.ts @@ -0,0 +1,15 @@ +/** + * Enum for the different template languages supported by the template editor + * + * - FString: `variables look like {variable}` + * - Mustache: `variables look like {{variable}}` + * + * @example + * ```tsx + * + * ``` + */ +export const TemplateLanguages = { + FString: "f-string", // {variable} + Mustache: "mustache", // {{variable}} +} as const; diff --git a/app/src/components/templateEditor/index.tsx b/app/src/components/templateEditor/index.tsx new file mode 100644 index 0000000000..e78d214853 --- /dev/null +++ b/app/src/components/templateEditor/index.tsx @@ -0,0 +1 @@ +export * from "./TemplateEditor"; diff --git a/app/src/components/templateEditor/language/__tests__/languageUtils.test.ts b/app/src/components/templateEditor/language/__tests__/languageUtils.test.ts new file mode 100644 index 0000000000..ad5b1c25b9 --- /dev/null +++ b/app/src/components/templateEditor/language/__tests__/languageUtils.test.ts @@ -0,0 +1,182 @@ +import { formatFString, FStringTemplatingLanguage } from "../fString"; +import { extractVariables } from "../languageUtils"; +import { + formatMustacheLike, + MustacheLikeTemplatingLanguage, +} from "../mustacheLike"; + +describe("language utils", () => { + it("should extract variable names from a mustache-like template", () => { + const tests = [ + { input: "{{name}}", expected: ["name"] }, + // TODO: add support for triple mustache escaping or at least use the inner most mustache as value + // { input: "{{name}} {{{age}}}", expected: ["name"] }, + { + input: + "Hi I'm {{name}} and I'm {{age}} years old and I live in {{city}}", + expected: ["name", "age", "city"], + }, + { + input: ` +hi there {{name}} +how are you? + +can you help with this json? + +{ "name": "John", "age": {{age}} }`, + expected: ["name", "age"], + }, + { + input: `{"name": "{{name}}", "age": {{age}}}`, + expected: ["name", "age"], + }, + { + input: `{"name": "\\{{name}}", "age": \\{{age}}}`, + expected: [], + }, + { + input: `{"name": "{{{name}}}"}`, + expected: ["{name}"], + }, + ] as const; + tests.forEach(({ input, expected }) => { + expect( + extractVariables({ + parser: MustacheLikeTemplatingLanguage.parser, + text: input, + }) + ).toEqual(expected); + }); + }); + + it("should extract variable names from a f-string template", () => { + const tests = [ + { input: "{name}", expected: ["name"] }, + { input: "{name} {age}", expected: ["name", "age"] }, + { input: "{name} {{age}}", expected: ["name"] }, + { + input: "Hi I'm {name} and I'm {age} years old and I live in {city}", + expected: ["name", "age", "city"], + }, + { + input: ` +hi there {name} +how are you? + +can you help with this json? + +{{ "name": "John", "age": {age} }}`, + expected: ["name", "age"], + }, + { input: "\\{test}", expected: [] }, + ] as const; + tests.forEach(({ input, expected }) => { + expect( + extractVariables({ + parser: FStringTemplatingLanguage.parser, + text: input, + }) + ).toEqual(expected); + }); + }); + + it("should format a mustache-like template", () => { + const tests = [ + { + input: "{{name}}", + variables: { name: "John" }, + expected: "John", + }, + { + input: "Hi {{name}}, this is bad syntax {{}}", + variables: { name: "John", age: 30 }, + expected: "Hi John, this is bad syntax {{}}", + }, + { + input: "{{name}} {{age}}", + variables: { name: "John", age: 30 }, + expected: "John 30", + }, + { + input: "{{name}} {age} {{city}}", + variables: { name: "John", city: "New York" }, + expected: "John {age} New York", + }, + { + input: ` +hi there {{name}} +how are you? + +can you help with this json? + +{ "name": "John", "age": {{age}} }`, + variables: { name: "John", age: 30 }, + expected: ` +hi there John +how are you? + +can you help with this json? + +{ "name": "John", "age": 30 }`, + }, + { + input: `{"name": "{{name}}", "age": {{age}}}`, + variables: { name: "John", age: 30 }, + expected: `{"name": "John", "age": 30}`, + }, + { + input: `{"name": "\\{{name}}", "age": "{{age\\}}"}`, + variables: { name: "John", age: 30 }, + expected: `{"name": "{{name}}", "age": "{{age\\}}"}`, + }, + ] as const; + tests.forEach(({ input, variables, expected }) => { + expect(formatMustacheLike({ text: input, variables })).toEqual(expected); + }); + }); + + it("should format a f-string template", () => { + const tests = [ + { + input: "{name}", + variables: { name: "John" }, + expected: "John", + }, + { + input: "{name} {age}", + variables: { name: "John", age: 30 }, + expected: "John 30", + }, + { + input: "{name} {{age}}", + variables: { name: "John", age: 30 }, + expected: "John {age}", + }, + { + input: ` +hi there {name} +how are you? + +can you help with this json? + +{{ "name": "John", "age": {age} }}`, + variables: { name: "John", age: 30 }, + expected: ` +hi there John +how are you? + +can you help with this json? + +{ "name": "John", "age": 30 }`, + }, + { + input: "\\{test\\}", + variables: { test: "value" }, + expected: "{test\\}", + }, + ] as const; + tests.forEach(({ input, variables, expected }) => { + expect(formatFString({ text: input, variables })).toEqual(expected); + }); + }); +}); diff --git a/app/src/components/templateEditor/language/fString/fStringTemplating.syntax.grammar b/app/src/components/templateEditor/language/fString/fStringTemplating.syntax.grammar new file mode 100644 index 0000000000..e5fc4c60a4 --- /dev/null +++ b/app/src/components/templateEditor/language/fString/fStringTemplating.syntax.grammar @@ -0,0 +1,34 @@ +// https://lezer.codemirror.net/docs/guide/ + +// the top level rule is the entry point for the parser +// these are the tokens that can appear in the top level of the tree +@top FStringTemplate {(Template | char | emptyTemplate | lEscape | sym )*} + +@skip {} { + // https://lezer.codemirror.net/docs/guide/#local-token-groups + // this rule uses local tokens so it must be defined + // inside of a skip block + Template { LBrace Variable+ RBrace } +} + +//https://lezer.codemirror.net/docs/guide/#tokens +// lowercase tokens are consumed by the parser but not included in the tree +// uppercase tokens are included in the tree +@tokens { + LBrace { "{" } + emptyTemplate { "{}" } + lEscape { "\\" "{" | "{{" } + sym { "{{" | "}}" | "\"" | "'" } + char { $[\n\r\t\u{20}\u{21}\u{23}-\u{5b}\u{5d}-\u{10ffff}] | "\\" esc } + esc { $["\\\/bfnrt] | "u" hex hex hex hex } + hex { $[0-9a-fA-F] } + @precedence { lEscape, LBrace, char, sym } +} + +// https://lezer.codemirror.net/docs/guide/#local-token-groups +// tokens that only exist in the context that they are used +// they only apply while inside the Template scope in this case +@local tokens { + RBrace { "}" } + Variable { ("\\" "}") | (![}])+ } +} diff --git a/app/src/components/templateEditor/language/fString/fStringTemplating.syntax.grammar.d.ts b/app/src/components/templateEditor/language/fString/fStringTemplating.syntax.grammar.d.ts new file mode 100644 index 0000000000..24c6a31539 --- /dev/null +++ b/app/src/components/templateEditor/language/fString/fStringTemplating.syntax.grammar.d.ts @@ -0,0 +1,3 @@ +import { LRParser } from "@lezer/lr"; + +export declare const parser: LRParser; diff --git a/app/src/components/templateEditor/language/fString/fStringTemplating.ts b/app/src/components/templateEditor/language/fString/fStringTemplating.ts new file mode 100644 index 0000000000..ec11fe8236 --- /dev/null +++ b/app/src/components/templateEditor/language/fString/fStringTemplating.ts @@ -0,0 +1,83 @@ +import { LanguageSupport, LRLanguage } from "@codemirror/language"; +import { styleTags, tags as t } from "@lezer/highlight"; + +import { extractVariables, format } from "../languageUtils"; + +import { parser } from "./fStringTemplating.syntax.grammar"; + +/** + * Define the language for the FString templating system + * + * @see https://codemirror.net/examples/lang-package/ + * + * @example + * ``` + * {question} + * + * {{ + * "answer": {answer} + * }} + * ``` + * In this example, the variables are `question` and `answer`. + * Double braces are not considered as variables, and will be converted to a single brace on format. + */ +export const FStringTemplatingLanguage = LRLanguage.define({ + parser: parser.configure({ + props: [ + // https://lezer.codemirror.net/docs/ref/#highlight.styleTags + styleTags({ + // style the opening brace of a template, not floating braces + "Template/LBrace": t.quote, + // style the closing brace of a template, not floating braces + "Template/RBrace": t.quote, + // style variables (stuff inside {}) + "Template/Variable": t.variableName, + // style invalid stuff, undefined tokens will be highlighted + "⚠": t.invalid, + }), + ], + }), + languageData: {}, +}); + +/** + * Generates a string representation of the parse tree of the given text + * + * Useful for debugging the parser + */ +export const debugParser = (text: string) => { + const tree = FStringTemplatingLanguage.parser.parse(text); + return tree.toString(); +}; + +/** + * Formats an FString template with the given variables. + */ +export const formatFString = ({ + text, + variables, +}: Omit[0], "parser" | "postFormat">) => + format({ + parser: FStringTemplatingLanguage.parser, + text, + variables, + postFormat: (text) => + text.replaceAll("\\{", "{").replaceAll("{{", "{").replaceAll("}}", "}"), + }); + +/** + * Extracts the variables from an FString template + */ +export const extractVariablesFromFString = (text: string) => { + return extractVariables({ + parser: FStringTemplatingLanguage.parser, + text, + }); +}; + +/** + * Creates a CodeMirror extension for the FString templating system + */ +export function FStringTemplating() { + return new LanguageSupport(FStringTemplatingLanguage); +} diff --git a/app/src/components/templateEditor/language/fString/index.ts b/app/src/components/templateEditor/language/fString/index.ts new file mode 100644 index 0000000000..0b8a84621b --- /dev/null +++ b/app/src/components/templateEditor/language/fString/index.ts @@ -0,0 +1 @@ +export * from "./fStringTemplating"; diff --git a/app/src/components/templateEditor/language/languageUtils.ts b/app/src/components/templateEditor/language/languageUtils.ts new file mode 100644 index 0000000000..c86c147af5 --- /dev/null +++ b/app/src/components/templateEditor/language/languageUtils.ts @@ -0,0 +1,95 @@ +import { LRParser } from "@lezer/lr"; + +/** + * Extracts all variables from a templated string. + * + * @returns An array of variable names. + */ +export const extractVariables = ({ + parser, + text, +}: { + /** + * The parser for the templating language. + * The parser should be a language parser that emits Variable nodes. + */ + parser: LRParser; + /** + * The text to extract variables from. + */ + text: string; +}) => { + const tree = parser.parse(text); + const variables: string[] = []; + const cur = tree.cursor(); + do { + if (cur.name === "Variable") { + variables.push(text.slice(cur.node.from, cur.node.to)); + } + } while (cur.next()); + return variables; +}; + +/** + * Formats a templated string with the given variables. + * + * The parser should be a language parser that emits Variable nodes as children of some parent node. + */ +export const format = ({ + parser, + text, + variables, + postFormat, +}: { + /** + * The parser for the templating language. + * + * Should be MustacheLikeTemplatingLanguage or FStringTemplatingLanguage. + * + * format assumes that the language produces a structure where Variable nodes + * are children of some parent node, in this case Template. + */ + parser: LRParser; + /** + * The text to format. + */ + text: string; + /** + * A mapping of variable names to their values. + * + * If a variable is not found in this object, it will be left as is. + */ + variables: Record; + /** + * Runs after formatting the text but just before returning the result + * + * Useful for doing post-parse processing, like replacing double braces with single braces, + * or trimming whitespace. + */ + postFormat?: (text: string) => string; +}) => { + if (!text) return ""; + let result = text; + let tree = parser.parse(result); + let cur = tree.cursor(); + do { + if (cur.name === "Variable") { + // grab the content inside of the braces + const variable = result.slice(cur.node.from, cur.node.to); + // grab the position of the content including the braces + const Template = cur.node.parent!; + if (variable in variables) { + // replace the content (including braces) with the variable value + result = `${result.slice(0, Template.from)}${variables[variable]}${result.slice(Template.to)}`; + // reparse the result so that positions are updated + tree = parser.parse(result); + // reset the cursor to the start of the new tree + cur = tree.cursor(); + } + } + } while (cur.next()); + if (postFormat) { + result = postFormat(result); + } + return result; +}; diff --git a/app/src/components/templateEditor/language/mustacheLike/index.ts b/app/src/components/templateEditor/language/mustacheLike/index.ts new file mode 100644 index 0000000000..7c8f48d8b6 --- /dev/null +++ b/app/src/components/templateEditor/language/mustacheLike/index.ts @@ -0,0 +1 @@ +export * from "./mustacheLikeTemplating"; diff --git a/app/src/components/templateEditor/language/mustacheLike/mustacheLikeTemplating.syntax.grammar b/app/src/components/templateEditor/language/mustacheLike/mustacheLikeTemplating.syntax.grammar new file mode 100644 index 0000000000..dfcb7cf5c4 --- /dev/null +++ b/app/src/components/templateEditor/language/mustacheLike/mustacheLikeTemplating.syntax.grammar @@ -0,0 +1,34 @@ +// https://lezer.codemirror.net/docs/guide/ + +// the top level rule is the entry point for the parser +// these are the tokens that can appear in the top level of the tree +@top MustacheLikeTemplate {(Template | char | emptyTemplate | lEscape | sym )*} + +@skip {} { + // https://lezer.codemirror.net/docs/guide/#local-token-groups + // this rule uses local tokens so it must be defined + // inside of a skip block + Template { LBrace Variable+ RBrace } +} + +//https://lezer.codemirror.net/docs/guide/#tokens +// lowercase tokens are consumed by the parser but not included in the tree +// uppercase tokens are included in the tree +@tokens { + LBrace { "{{" } + emptyTemplate { "{{}}" } + lEscape { "\\" "{" } + sym { "{" | "}" | "\"" | "'" } + char { $[\n\r\t\u{20}\u{21}\u{23}-\u{5b}\u{5d}-\u{10ffff}] | "\\" esc } + esc { $["\\\/bfnrt] | "u" hex hex hex hex } + hex { $[0-9a-fA-F] } + @precedence { lEscape, LBrace, char, sym } +} + +// https://lezer.codemirror.net/docs/guide/#local-token-groups +// tokens that only exist in the context that they are used +// they only apply while inside the Template scope in this case +@local tokens { + RBrace { "}}" } + Variable { (![}])+ | "{" (![}])+ ("}" | "\\") } +} diff --git a/app/src/components/templateEditor/language/mustacheLike/mustacheLikeTemplating.syntax.grammar.d.ts b/app/src/components/templateEditor/language/mustacheLike/mustacheLikeTemplating.syntax.grammar.d.ts new file mode 100644 index 0000000000..24c6a31539 --- /dev/null +++ b/app/src/components/templateEditor/language/mustacheLike/mustacheLikeTemplating.syntax.grammar.d.ts @@ -0,0 +1,3 @@ +import { LRParser } from "@lezer/lr"; + +export declare const parser: LRParser; diff --git a/app/src/components/templateEditor/language/mustacheLike/mustacheLikeTemplating.ts b/app/src/components/templateEditor/language/mustacheLike/mustacheLikeTemplating.ts new file mode 100644 index 0000000000..b1f9b3015a --- /dev/null +++ b/app/src/components/templateEditor/language/mustacheLike/mustacheLikeTemplating.ts @@ -0,0 +1,86 @@ +import { LanguageSupport, LRLanguage } from "@codemirror/language"; +import { styleTags, tags as t } from "@lezer/highlight"; + +import { extractVariables, format } from "../languageUtils"; + +import { parser } from "./mustacheLikeTemplating.syntax.grammar"; + +/** + * Defines the language for the Mustache-like templating system + * + * @see https://codemirror.net/examples/lang-package/ + * + * @example + * ``` + * {{question}} + * + * { + * "answer": {{answer}} + * } + * ``` + * In this example, the variables are `question` and `answer`. + * Single braces are not considered as variables. + * Double braces will be interpolated with variable values on format. + */ +export const MustacheLikeTemplatingLanguage = LRLanguage.define({ + parser: parser.configure({ + props: [ + // https://lezer.codemirror.net/docs/ref/#highlight.styleTags + styleTags({ + // style the opening brace ({{) of a template, not floating braces + "Template/LBrace": t.quote, + // style the closing brace (}}) of a template, not floating braces + "Template/RBrace": t.quote, + // style variables (stuff inside {{}}) + "Template/Variable": t.variableName, + // style invalid stuff, undefined tokens will be highlighted + "Template/⚠": t.invalid, + }), + ], + }), + languageData: {}, +}); + +/** + * Generates a string representation of the parse tree of the given text + * + * Useful for debugging the parser + */ +export const debugParser = (text: string) => { + const tree = MustacheLikeTemplatingLanguage.parser.parse(text); + return tree.toString(); +}; + +/** + * Formats a Mustache-like template with the given variables. + */ +export const formatMustacheLike = ({ + text, + variables, +}: Omit[0], "parser" | "postFormat">) => + format({ + parser: MustacheLikeTemplatingLanguage.parser, + text, + variables, + postFormat: (text) => { + // replace escaped double braces with double brace + return text.replaceAll("\\{{", "{{"); + }, + }); + +/** + * Extracts the variables from a Mustache-like template + */ +export const extractVariablesFromMustacheLike = (text: string) => { + return extractVariables({ + parser: MustacheLikeTemplatingLanguage.parser, + text, + }); +}; + +/** + * Creates a CodeMirror extension for the FString templating system + */ +export function MustacheLikeTemplating() { + return new LanguageSupport(MustacheLikeTemplatingLanguage); +} diff --git a/app/src/components/templateEditor/templateEditorUtils.ts b/app/src/components/templateEditor/templateEditorUtils.ts new file mode 100644 index 0000000000..1dd1510ce7 --- /dev/null +++ b/app/src/components/templateEditor/templateEditorUtils.ts @@ -0,0 +1,53 @@ +import { assertUnreachable } from "@phoenix/typeUtils"; + +import { extractVariablesFromFString, formatFString } from "./language/fString"; +import { + extractVariablesFromMustacheLike, + formatMustacheLike, +} from "./language/mustacheLike"; +import { TemplateLanguages } from "./constants"; +import { TemplateLanguage } from "./types"; + +/** + * A function that formats a template with the given variables + */ +export type FormatFn = (arg: { + text: string; + variables: Record; +}) => string; + +/** + * A function that extracts the variables from a template + */ +export type ExtractVariablesFn = (template: string) => string[]; + +/** + * Get an object of isomorphic functions for processing templates of the given language + * + * @param templateLanguage - The language of the template to process + * + * @returns An object containing the `format` and `extractVariables` functions. + * These functions share the same signature despite the different underlying + * templating languages. + */ +export const getTemplateLanguageUtils = ( + templateLanguage: TemplateLanguage +): { + format: FormatFn; + extractVariables: ExtractVariablesFn; +} => { + switch (templateLanguage) { + case TemplateLanguages.FString: + return { + format: formatFString, + extractVariables: extractVariablesFromFString, + }; + case TemplateLanguages.Mustache: + return { + format: formatMustacheLike, + extractVariables: extractVariablesFromMustacheLike, + }; + default: + assertUnreachable(templateLanguage); + } +}; diff --git a/app/src/components/templateEditor/types.ts b/app/src/components/templateEditor/types.ts new file mode 100644 index 0000000000..20a7ccfd7c --- /dev/null +++ b/app/src/components/templateEditor/types.ts @@ -0,0 +1,11 @@ +import { TemplateLanguages } from "./constants"; + +export type TemplateLanguage = + (typeof TemplateLanguages)[keyof typeof TemplateLanguages]; + +/** + * Type guard for the TemplateLanguage type + */ +export function isTemplateLanguage(v: string): v is TemplateLanguage { + return Object.values(TemplateLanguages).includes(v as TemplateLanguage); +} diff --git a/app/src/constants/generativeConstants.ts b/app/src/constants/generativeConstants.ts new file mode 100644 index 0000000000..837d11ccd6 --- /dev/null +++ b/app/src/constants/generativeConstants.ts @@ -0,0 +1,15 @@ +/** + * A mapping of ModelProvider to a human-readable string + */ +export const ModelProviders: Record = { + OPENAI: "OpenAI", + AZURE_OPENAI: "Azure OpenAI", + ANTHROPIC: "Anthropic", +}; + +/** + * The default model provider + */ +export const DEFAULT_MODEL_PROVIDER: ModelProvider = "OPENAI"; + +export const DEFAULT_CHAT_ROLE: ChatMessageRole = "user"; diff --git a/app/src/contexts/CredentialsContext.tsx b/app/src/contexts/CredentialsContext.tsx new file mode 100644 index 0000000000..ad26b4ede1 --- /dev/null +++ b/app/src/contexts/CredentialsContext.tsx @@ -0,0 +1,35 @@ +import React, { createContext, PropsWithChildren, useState } from "react"; +import { useZustand } from "use-zustand"; + +import { + createCredentialsStore, + CredentialsProps, + CredentialsState, + CredentialsStore, +} from "@phoenix/store"; + +export const CredentialsContext = createContext(null); + +export function CredentialsProvider({ + children, + ...props +}: PropsWithChildren>) { + const [store] = useState(() => + createCredentialsStore(props) + ); + return ( + + {children} + + ); +} + +export function useCredentialsContext( + selector: (state: CredentialsState) => T, + equalityFn?: (left: T, right: T) => boolean +): T { + const store = React.useContext(CredentialsContext); + if (!store) + throw new Error("Missing CredentialsContext.Provider in the tree"); + return useZustand(store, selector, equalityFn); +} diff --git a/app/src/contexts/PlaygroundContext.tsx b/app/src/contexts/PlaygroundContext.tsx index e6a353f9c2..e3c115e5d9 100644 --- a/app/src/contexts/PlaygroundContext.tsx +++ b/app/src/contexts/PlaygroundContext.tsx @@ -6,7 +6,7 @@ import { PlaygroundProps, PlaygroundState, PlaygroundStore, -} from "@phoenix/store/playgroundStore"; +} from "@phoenix/store"; export const PlaygroundContext = createContext(null); diff --git a/app/src/hooks/useChatMessageStyles.ts b/app/src/hooks/useChatMessageStyles.ts new file mode 100644 index 0000000000..3ccfb43b54 --- /dev/null +++ b/app/src/hooks/useChatMessageStyles.ts @@ -0,0 +1,35 @@ +import { useMemo } from "react"; + +import { ViewStyleProps } from "@arizeai/components"; + +export function useChatMessageStyles( + role: string +): Pick { + return useMemo(() => { + if (role === "user" || role === "human") { + return { + backgroundColor: "grey-100", + borderColor: "grey-500", + }; + } else if (role === "assistant" || role === "ai") { + return { + backgroundColor: "blue-100", + borderColor: "blue-700", + }; + } else if (role === "system") { + return { + backgroundColor: "indigo-100", + borderColor: "indigo-700", + }; + } else if (["function", "tool"].includes(role)) { + return { + backgroundColor: "yellow-100", + borderColor: "yellow-700", + }; + } + return { + backgroundColor: "grey-100", + borderColor: "grey-700", + }; + }, [role]); +} diff --git a/app/src/pages/dataset/DatasetCodeDropdown.tsx b/app/src/pages/dataset/DatasetCodeDropdown.tsx index 6148057739..cd338421c6 100644 --- a/app/src/pages/dataset/DatasetCodeDropdown.tsx +++ b/app/src/pages/dataset/DatasetCodeDropdown.tsx @@ -72,7 +72,7 @@ export function DatasetCodeDropdown() { width="100%" > - + @@ -105,7 +105,7 @@ export function DatasetCodeDropdown() {
- +
diff --git a/app/src/pages/playground/MessageRolePicker.tsx b/app/src/pages/playground/MessageRolePicker.tsx new file mode 100644 index 0000000000..6d0c26131b --- /dev/null +++ b/app/src/pages/playground/MessageRolePicker.tsx @@ -0,0 +1,56 @@ +import React from "react"; +import { css } from "@emotion/react"; + +import { Item, Picker } from "@arizeai/components"; + +import { isChatMessageRole } from "./playgroundUtils"; + +const hiddenLabelCSS = css` + .ac-field-label { + display: none; + } +`; + +type MessageRolePickerProps = { + /** + * The currently selected message role + */ + role: ChatMessageRole; + /** + * Whether to display a label for the picker + * This may be set to false in cases where the picker is rendered in a table for instance + * @default true + */ + includeLabel?: boolean; + /** + * Callback for when the message role changes + */ + onChange: (role: ChatMessageRole) => void; +}; + +export function MessageRolePicker({ + role, + includeLabel = true, + onChange, +}: MessageRolePickerProps) { + return ( + { + if (!isChatMessageRole(e)) { + throw new Error(`Invalid chat message role: ${e}`); + } + onChange(e); + }} + > + System + User + AI + + ); +} diff --git a/app/src/pages/playground/ModelConfigButton.tsx b/app/src/pages/playground/ModelConfigButton.tsx new file mode 100644 index 0000000000..99bbc113a8 --- /dev/null +++ b/app/src/pages/playground/ModelConfigButton.tsx @@ -0,0 +1,147 @@ +import React, { + Fragment, + ReactNode, + startTransition, + Suspense, + useCallback, + useState, +} from "react"; +import { graphql, useLazyLoadQuery } from "react-relay"; + +import { + Button, + Dialog, + DialogContainer, + Flex, + Form, + Text, + TextField, + View, +} from "@arizeai/components"; + +import { ModelProviders } from "@phoenix/constants/generativeConstants"; +import { usePlaygroundContext } from "@phoenix/contexts/PlaygroundContext"; + +import { ModelConfigButtonDialogQuery } from "./__generated__/ModelConfigButtonDialogQuery.graphql"; +import { ModelPicker } from "./ModelPicker"; +import { ModelProviderPicker } from "./ModelProviderPicker"; +import { PlaygroundInstanceProps } from "./types"; + +interface ModelConfigButtonProps extends PlaygroundInstanceProps {} +export function ModelConfigButton(props: ModelConfigButtonProps) { + const [dialog, setDialog] = useState(null); + const instance = usePlaygroundContext((state) => + state.instances.find( + (instance) => instance.id === props.playgroundInstanceId + ) + ); + + if (!instance) { + throw new Error( + `Playground instance ${props.playgroundInstanceId} not found` + ); + } + return ( + + + { + setDialog(null); + }} + > + {dialog} + + + ); +} + +interface ModelConfigDialogContentProps extends ModelConfigButtonProps {} +function ModelConfigDialogContent(props: ModelConfigDialogContentProps) { + const { playgroundInstanceId } = props; + const updateModel = usePlaygroundContext((state) => state.updateModel); + const instance = usePlaygroundContext((state) => + state.instances.find((instance) => instance.id === playgroundInstanceId) + ); + if (!instance) { + throw new Error( + `Playground instance ${props.playgroundInstanceId} not found` + ); + } + const query = useLazyLoadQuery( + graphql` + query ModelConfigButtonDialogQuery($providerKey: GenerativeProviderKey!) { + ...ModelProviderPickerFragment + ...ModelPickerFragment @arguments(providerKey: $providerKey) + } + `, + { providerKey: instance.model.provider } + ); + + const onModelNameChange = useCallback( + (modelName: string) => { + updateModel({ + instanceId: playgroundInstanceId, + model: { + provider: instance.model.provider, + modelName, + }, + }); + }, + [instance.model.provider, playgroundInstanceId, updateModel] + ); + + return ( + +
+ { + updateModel({ + instanceId: playgroundInstanceId, + model: { + provider, + modelName: null, + }, + }); + }} + /> + {instance.model.provider === "AZURE_OPENAI" ? ( + + ) : ( + + )} + +
+ ); +} diff --git a/app/src/pages/playground/ModelPicker.tsx b/app/src/pages/playground/ModelPicker.tsx new file mode 100644 index 0000000000..1d9258e803 --- /dev/null +++ b/app/src/pages/playground/ModelPicker.tsx @@ -0,0 +1,50 @@ +import React from "react"; +import { graphql, useFragment } from "react-relay"; + +import { Item, Picker, PickerProps } from "@arizeai/components"; + +import { ModelPickerFragment$key } from "./__generated__/ModelPickerFragment.graphql"; + +type ModelPickerProps = { + query: ModelPickerFragment$key; + onChange: (model: string) => void; + provider: ModelProvider; + modelName: string | null; +} & Omit< + PickerProps, + "children" | "onSelectionChange" | "defaultSelectedKey" +>; + +export function ModelPicker({ query, onChange, ...props }: ModelPickerProps) { + const data = useFragment( + graphql` + fragment ModelPickerFragment on Query + @argumentDefinitions( + providerKey: { type: "GenerativeProviderKey!", defaultValue: OPENAI } + ) { + modelNames(input: { providerKey: $providerKey }) + } + `, + query + ); + return ( + { + if (typeof key === "string") { + onChange(key); + } + }} + width={"100%"} + {...props} + > + {data.modelNames.map((modelName) => { + return {modelName}; + })} + + ); +} diff --git a/app/src/pages/playground/ModelProviderPicker.tsx b/app/src/pages/playground/ModelProviderPicker.tsx new file mode 100644 index 0000000000..94a30e34e5 --- /dev/null +++ b/app/src/pages/playground/ModelProviderPicker.tsx @@ -0,0 +1,56 @@ +import React from "react"; +import { graphql, useFragment } from "react-relay"; + +import { Item, Picker, PickerProps } from "@arizeai/components"; + +import { isModelProvider } from "@phoenix/utils/generativeUtils"; + +import type { ModelProviderPickerFragment$key } from "./__generated__/ModelProviderPickerFragment.graphql"; + +type ModelProviderPickerProps = { + onChange: (provider: ModelProvider) => void; + query: ModelProviderPickerFragment$key; + provider?: ModelProvider; +} & Omit< + PickerProps, + "children" | "onSelectionChange" | "defaultSelectedKey" +>; + +export function ModelProviderPicker({ + onChange, + query, + ...props +}: ModelProviderPickerProps) { + const data = useFragment( + graphql` + fragment ModelProviderPickerFragment on Query { + modelProviders { + key + name + } + } + `, + query + ); + return ( + { + const provider = key as string; + if (isModelProvider(provider)) { + onChange(provider); + } + }} + width={"100%"} + {...props} + > + {data.modelProviders.map((provider) => { + return {provider.name}; + })} + + ); +} diff --git a/app/src/pages/playground/Playground.tsx b/app/src/pages/playground/Playground.tsx index b326596816..b3ef5b2dca 100644 --- a/app/src/pages/playground/Playground.tsx +++ b/app/src/pages/playground/Playground.tsx @@ -2,54 +2,195 @@ import React from "react"; import { Panel, PanelGroup, PanelResizeHandle } from "react-resizable-panels"; import { css } from "@emotion/react"; -import { Button, Flex, Heading, View } from "@arizeai/components"; +import { + Accordion, + AccordionItem, + Button, + Flex, + Heading, + Icon, + Icons, + View, +} from "@arizeai/components"; import { resizeHandleCSS } from "@phoenix/components/resize"; -import { PlaygroundProvider } from "@phoenix/contexts/PlaygroundContext"; +import { + PlaygroundProvider, + usePlaygroundContext, +} from "@phoenix/contexts/PlaygroundContext"; +import { InitialPlaygroundState } from "@phoenix/store"; +import { NUM_MAX_PLAYGROUND_INSTANCES } from "./constants"; +import { PlaygroundCredentialsDropdown } from "./PlaygroundCredentialsDropdown"; import { PlaygroundInput } from "./PlaygroundInput"; -import { PlaygroundOperationTypeRadioGroup } from "./PlaygroundOperationTypeRadioGroup"; +import { PlaygroundInputTypeTypeRadioGroup } from "./PlaygroundInputModeRadioGroup"; import { PlaygroundOutput } from "./PlaygroundOutput"; +import { PlaygroundRunButton } from "./PlaygroundRunButton"; import { PlaygroundTemplate } from "./PlaygroundTemplate"; -import { PlaygroundTools } from "./PlaygroundTools"; +import { TemplateLanguageRadioGroup } from "./TemplateLanguageRadioGroup"; -const panelContentCSS = css` - padding: var(--ac-global-dimension-size-200); - overflow: auto; +export function Playground(props: InitialPlaygroundState) { + return ( + + + + + Playground + + + + + + + + + + ); +} + +function AddPromptButton() { + const addInstance = usePlaygroundContext((state) => state.addInstance); + const numInstances = usePlaygroundContext((state) => state.instances.length); + return ( +
{ + // Stop propagation to prevent the accordion from closing + e.stopPropagation(); + }} + > + +
+ ); +} + +const playgroundPromptPanelContentCSS = css` + display: flex; + flex-direction: column; + height: 100%; + overflow: hidden; + & > .ac-accordion { + display: flex; + flex-direction: column; + height: 100%; + overflow: hidden; + flex: 1 1 auto; + & > .ac-accordion-item { + height: 100%; + overflow: hidden; + flex: 1 1 auto; + .ac-accordion-itemContent { + height: 100%; + overflow: hidden; + flex: 1 1 auto; + & > .ac-view { + height: 100%; + flex: 1 1 auto; + overflow: auto; + box-sizing: border-box; + } + } + } + } +`; + +const playgroundInputOutputPanelContentCSS = css` display: flex; flex-direction: column; - gap: var(--ac-global-dimension-size-200); + height: 100%; + overflow: auto; `; -export function Playground() { +function PlaygroundContent() { + const instances = usePlaygroundContext((state) => state.instances); + const numInstances = instances.length; + const isSingleInstance = numInstances === 1; + return ( - - - - - - Playground - - - - - - - - - - - - - - - - - - + + +
+ + + + + + } + > + + + {instances.map((instance) => ( + + + + ))} + + + + +
+
+ + +
+ + } + > + + + + + + + + {instances.map((instance, i) => ( + + + + ))} + + + + +
+
+
); } diff --git a/app/src/pages/playground/PlaygroundChatTemplate.tsx b/app/src/pages/playground/PlaygroundChatTemplate.tsx new file mode 100644 index 0000000000..bea1b63f1d --- /dev/null +++ b/app/src/pages/playground/PlaygroundChatTemplate.tsx @@ -0,0 +1,300 @@ +import React, { PropsWithChildren } from "react"; +import { + DndContext, + KeyboardSensor, + PointerSensor, + useSensor, + useSensors, +} from "@dnd-kit/core"; +import { + arrayMove, + SortableContext, + sortableKeyboardCoordinates, + useSortable, +} from "@dnd-kit/sortable"; +import { CSS } from "@dnd-kit/utilities"; +import { css } from "@emotion/react"; + +import { Button, Card, Flex, Icon, Icons, View } from "@arizeai/components"; + +import { CopyToClipboardButton } from "@phoenix/components"; +import { DragHandle } from "@phoenix/components/dnd/DragHandle"; +import { TemplateEditor } from "@phoenix/components/templateEditor"; +import { TemplateLanguage } from "@phoenix/components/templateEditor/types"; +import { usePlaygroundContext } from "@phoenix/contexts/PlaygroundContext"; +import { useChatMessageStyles } from "@phoenix/hooks/useChatMessageStyles"; +import { + ChatMessage, + createTool, + generateMessageId, + PlaygroundChatTemplate as PlaygroundChatTemplateType, +} from "@phoenix/store"; + +import { MessageRolePicker } from "./MessageRolePicker"; +import { PlaygroundTools } from "./PlaygroundTools"; +import { PlaygroundInstanceProps } from "./types"; + +const MESSAGE_Z_INDEX = 1; +/** + * The z-index of the dragging message. + * Must be higher than the z-index of the other messages. Otherwise when dragging + * from top to bottom, the dragging message will be covered by the message below. + */ +const DRAGGING_MESSAGE_Z_INDEX = MESSAGE_Z_INDEX + 1; + +interface PlaygroundChatTemplateProps extends PlaygroundInstanceProps {} + +export function PlaygroundChatTemplate(props: PlaygroundChatTemplateProps) { + const id = props.playgroundInstanceId; + + const templateLanguage = usePlaygroundContext( + (state) => state.templateLanguage + ); + const instances = usePlaygroundContext((state) => state.instances); + const updateInstance = usePlaygroundContext((state) => state.updateInstance); + const playgroundInstance = instances.find((instance) => instance.id === id); + if (!playgroundInstance) { + throw new Error(`Playground instance ${id} not found`); + } + const hasTools = playgroundInstance.tools.length > 0; + const { template } = playgroundInstance; + if (template.__type !== "chat") { + throw new Error(`Invalid template type ${template.__type}`); + } + + const sensors = useSensors( + useSensor(PointerSensor), + useSensor(KeyboardSensor, { + coordinateGetter: sortableKeyboardCoordinates, + }) + ); + + return ( + { + if (!over || active.id === over.id) { + return; + } + const activeIndex = template.messages.findIndex( + (message) => message.id === active.id + ); + const overIndex = template.messages.findIndex( + (message) => message.id === over.id + ); + const newMessages = arrayMove( + template.messages, + activeIndex, + overIndex + ); + updateInstance({ + instanceId: id, + patch: { + template: { + __type: "chat", + messages: newMessages, + }, + }, + }); + }} + > + +
    + {template.messages.map((message, index) => { + return ( + + ); + })} +
+
+ + + + + + + {hasTools ? : null} +
+ ); +} + +function SortableMessageItem({ + playgroundInstanceId, + templateLanguage, + template, + message, +}: PropsWithChildren< + PlaygroundInstanceProps & { + template: PlaygroundChatTemplateType; + message: ChatMessage; + templateLanguage: TemplateLanguage; + index: number; + } +>) { + const updateInstance = usePlaygroundContext((state) => state.updateInstance); + const { + attributes, + listeners, + setNodeRef, + transform, + transition, + setActivatorNodeRef, + isDragging, + } = useSortable({ + id: message.id, + }); + + const messageCardStyles = useChatMessageStyles(message.role); + const dragAndDropLiStyles = { + transform: CSS.Translate.toString(transform), + transition, + zIndex: isDragging ? DRAGGING_MESSAGE_Z_INDEX : MESSAGE_Z_INDEX, + }; + + return ( +
  • + { + updateInstance({ + instanceId: playgroundInstanceId, + patch: { + template: { + __type: "chat", + messages: template.messages.map((msg) => + msg.id === message.id ? { ...msg, role } : msg + ), + }, + }, + }); + }} + /> + } + extra={ + + {message.content != null && ( + + )} +
  • + ); +} diff --git a/app/src/pages/playground/PlaygroundCredentialsDropdown.tsx b/app/src/pages/playground/PlaygroundCredentialsDropdown.tsx new file mode 100644 index 0000000000..c4273fc232 --- /dev/null +++ b/app/src/pages/playground/PlaygroundCredentialsDropdown.tsx @@ -0,0 +1,76 @@ +import React from "react"; +import { css } from "@emotion/react"; + +import { + DropdownButton, + DropdownMenu, + DropdownTrigger, + Flex, + Form, + Heading, + Text, + TextField, + View, +} from "@arizeai/components"; + +import { useCredentialsContext } from "@phoenix/contexts/CredentialsContext"; +import { usePlaygroundContext } from "@phoenix/contexts/PlaygroundContext"; + +export const ProviderToCredentialNameMap: Record = { + OPENAI: "OPENAI_API_KEY", + ANTHROPIC: "ANTHROPIC_API_KEY", + AZURE_OPENAI: "AZURE_OPENAI_API_KEY", +}; + +export function PlaygroundCredentialsDropdown() { + const currentProviders = usePlaygroundContext((state) => + Array.from( + new Set(state.instances.map((instance) => instance.model.provider)) + ) + ); + const setCredential = useCredentialsContext((state) => state.setCredential); + const credentials = useCredentialsContext((state) => state); + return ( +
    + + API Keys + + + + + API Keys + + + API keys are stored in your browser and used to communicate with + their respective API's. + +
    + {currentProviders.map((provider) => { + const credentialName = ProviderToCredentialNameMap[provider]; + return ( + { + setCredential({ provider, value }); + }} + value={credentials[provider]} + /> + ); + })} + +
    +
    +
    +
    +
    + ); +} diff --git a/app/src/pages/playground/PlaygroundInput.tsx b/app/src/pages/playground/PlaygroundInput.tsx index aa72e5ba8f..14ece296ab 100644 --- a/app/src/pages/playground/PlaygroundInput.tsx +++ b/app/src/pages/playground/PlaygroundInput.tsx @@ -1,11 +1,69 @@ import React from "react"; -import { Card } from "@arizeai/components"; +import { Flex, Text, View } from "@arizeai/components"; + +import { usePlaygroundContext } from "@phoenix/contexts/PlaygroundContext"; +import { + selectDerivedInputVariables, + selectInputVariableKeys, +} from "@phoenix/store"; +import { assertUnreachable } from "@phoenix/typeUtils"; + +import { VariableEditor } from "./VariableEditor"; export function PlaygroundInput() { + const variables = usePlaygroundContext(selectDerivedInputVariables); + const variableKeys = usePlaygroundContext(selectInputVariableKeys); + const setVariableValue = usePlaygroundContext( + (state) => state.setVariableValue + ); + const templateLanguage = usePlaygroundContext( + (state) => state.templateLanguage + ); + + if (variableKeys.length === 0) { + let templateSyntax = ""; + switch (templateLanguage) { + case "f-string": { + templateSyntax = "{input name}"; + break; + } + case "mustache": { + templateSyntax = "{{input name}}"; + break; + } + default: + assertUnreachable(templateLanguage); + } + return ( + + + + Add variable inputs to your prompt using{" "} + {templateSyntax} within your prompt + template. + + + + ); + } + return ( - - Input goes here - + + {variableKeys.map((variableKey, i) => { + return ( + setVariableValue(variableKey, value)} + /> + ); + })} + ); } diff --git a/app/src/pages/playground/PlaygroundInputModeRadioGroup.tsx b/app/src/pages/playground/PlaygroundInputModeRadioGroup.tsx new file mode 100644 index 0000000000..313b9089c5 --- /dev/null +++ b/app/src/pages/playground/PlaygroundInputModeRadioGroup.tsx @@ -0,0 +1,32 @@ +import React from "react"; + +import { Radio, RadioGroup } from "@arizeai/components"; + +import { usePlaygroundContext } from "@phoenix/contexts/PlaygroundContext"; + +/** + * A store connected radio group that toggles between manual and dataset input types. + */ +export function PlaygroundInputTypeTypeRadioGroup() { + const inputMode = usePlaygroundContext((state) => state.inputMode); + const setInputMode = usePlaygroundContext((state) => state.setInputMode); + return ( + { + if (value === "manual" || value === "dataset") { + setInputMode(value); + } + }} + > + + Manual + + + Dataset + + + ); +} diff --git a/app/src/pages/playground/PlaygroundOutput.tsx b/app/src/pages/playground/PlaygroundOutput.tsx index 6c521a707d..f0a7f65b4d 100644 --- a/app/src/pages/playground/PlaygroundOutput.tsx +++ b/app/src/pages/playground/PlaygroundOutput.tsx @@ -1,11 +1,305 @@ -import React from "react"; +import React, { useMemo, useState } from "react"; +import { useSubscription } from "react-relay"; +import { graphql, GraphQLSubscriptionConfig } from "relay-runtime"; +import { css } from "@emotion/react"; -import { Card } from "@arizeai/components"; +import { Card, Flex, Icon, Icons } from "@arizeai/components"; + +import { useCredentialsContext } from "@phoenix/contexts/CredentialsContext"; +import { usePlaygroundContext } from "@phoenix/contexts/PlaygroundContext"; +import { useChatMessageStyles } from "@phoenix/hooks/useChatMessageStyles"; +import type { ToolCall } from "@phoenix/store"; +import { ChatMessage, generateMessageId } from "@phoenix/store"; +import { assertUnreachable } from "@phoenix/typeUtils"; + +import { + ChatCompletionMessageInput, + ChatCompletionMessageRole, + PlaygroundOutputSubscription, + PlaygroundOutputSubscription$data, + PlaygroundOutputSubscription$variables, +} from "./__generated__/PlaygroundOutputSubscription.graphql"; +import { isChatMessages } from "./playgroundUtils"; +import { TitleWithAlphabeticIndex } from "./TitleWithAlphabeticIndex"; +import { PlaygroundInstanceProps } from "./types"; + +interface PlaygroundOutputProps extends PlaygroundInstanceProps {} + +function PlaygroundOutputMessage({ message }: { message: ChatMessage }) { + const { role, content, toolCalls } = message; + const styles = useChatMessageStyles(role); + + return ( + + {content != null && ( + + {content} + + )} + {toolCalls && toolCalls.length > 0 + ? toolCalls.map((toolCall) => { + return ( +
    +                {toolCall.function.name}(
    +                {JSON.stringify(
    +                  JSON.parse(toolCall.function.arguments),
    +                  null,
    +                  2
    +                )}
    +                )
    +              
    + ); + }) + : null} +
    + ); +} + +export function PlaygroundOutput(props: PlaygroundOutputProps) { + const instanceId = props.playgroundInstanceId; + const instance = usePlaygroundContext((state) => + state.instances.find((instance) => instance.id === instanceId) + ); + const index = usePlaygroundContext((state) => + state.instances.findIndex((instance) => instance.id === instanceId) + ); + if (!instance) { + throw new Error(`Playground instance ${instanceId} not found`); + } + + const runId = instance.activeRunId; + const hasRunId = runId !== null; + + const OutputEl = useMemo(() => { + if (hasRunId) { + return ( + + ); + } + if (isChatMessages(instance.output)) { + const messages = instance.output; + + return messages.map((message, index) => { + return ; + }); + } + if (typeof instance.output === "string") { + return ( + + ); + } + return "click run to see output"; + }, [hasRunId, instance.output, instanceId, runId]); -export function PlaygroundOutput() { return ( - - Output goes here + } + collapsible + variant="compact" + > + {OutputEl} ); } + +function useChatCompletionSubscription({ + params, + runId, + onNext, + onCompleted, +}: { + params: PlaygroundOutputSubscription$variables; + runId: number; + onNext: (response: PlaygroundOutputSubscription$data) => void; + onCompleted: () => void; +}) { + const config = useMemo< + GraphQLSubscriptionConfig + >( + () => ({ + subscription: graphql` + subscription PlaygroundOutputSubscription( + $messages: [ChatCompletionMessageInput!]! + $model: GenerativeModelInput! + $invocationParameters: InvocationParameters! + $tools: [JSON!] + $apiKey: String + ) { + chatCompletion( + input: { + messages: $messages + model: $model + invocationParameters: $invocationParameters + tools: $tools + apiKey: $apiKey + } + ) { + __typename + ... on TextChunk { + content + } + ... on ToolCallChunk { + id + function { + name + arguments + } + } + } + } + `, + variables: params, + onNext: (response) => { + if (response) { + onNext(response); + } + }, + onCompleted: () => { + onCompleted(); + }, + }), + // eslint-disable-next-line react-compiler/react-compiler + // eslint-disable-next-line react-hooks/exhaustive-deps + [runId] + ); + return useSubscription(config); +} + +/** + * A utility function to convert playground messages content to GQL chat completion message input + */ +function toGqlChatCompletionMessage( + message: ChatMessage +): ChatCompletionMessageInput { + return { + content: message.content, + role: toGqlChatCompletionRole(message.role), + }; +} + +function toGqlChatCompletionRole( + role: ChatMessageRole +): ChatCompletionMessageRole { + switch (role) { + case "system": + return "SYSTEM"; + case "user": + return "USER"; + case "tool": + return "TOOL"; + case "ai": + return "AI"; + default: + assertUnreachable(role); + } +} + +function PlaygroundOutputText(props: PlaygroundInstanceProps) { + const instances = usePlaygroundContext((state) => state.instances); + const credentials = useCredentialsContext((state) => state); + const instance = instances.find( + (instance) => instance.id === props.playgroundInstanceId + ); + const markPlaygroundInstanceComplete = usePlaygroundContext( + (state) => state.markPlaygroundInstanceComplete + ); + if (!instance) { + throw new Error("No instance found"); + } + if (typeof instance.activeRunId !== "number") { + throw new Error("No message found"); + } + + if (instance.template.__type !== "chat") { + throw new Error("We only support chat templates for now"); + } + + const [output, setOutput] = useState(undefined); + const [toolCalls, setToolCalls] = useState([]); + + useChatCompletionSubscription({ + params: { + messages: instance.template.messages.map(toGqlChatCompletionMessage), + model: { + providerKey: instance.model.provider, + name: instance.model.modelName || "", + }, + invocationParameters: { + toolChoice: instance.toolChoice, + }, + tools: instance.tools.map((tool) => tool.definition), + apiKey: credentials[instance.model.provider], + }, + runId: instance.activeRunId, + onNext: (response) => { + const chatCompletion = response.chatCompletion; + if (chatCompletion.__typename === "TextChunk") { + setOutput((acc) => (acc || "") + chatCompletion.content); + } else if (chatCompletion.__typename === "ToolCallChunk") { + setToolCalls((toolCalls) => { + let toolCallExists = false; + const updated = toolCalls.map((toolCall) => { + if (toolCall.id === chatCompletion.id) { + toolCallExists = true; + return { + ...toolCall, + function: { + ...toolCall.function, + arguments: + toolCall.function.arguments + + chatCompletion.function.arguments, + }, + }; + } else { + return toolCall; + } + }); + if (!toolCallExists) { + updated.push({ + id: chatCompletion.id, + function: { + name: chatCompletion.function.name, + arguments: chatCompletion.function.arguments, + }, + }); + } + return updated; + }); + } + }, + onCompleted: () => { + markPlaygroundInstanceComplete(props.playgroundInstanceId); + }, + }); + + if (!output && (toolCalls.length === 0 || instance.isRunning)) { + return ( + + } /> + Running... + + ); + } + return ( + + ); +} diff --git a/app/src/pages/playground/PlaygroundRunButton.tsx b/app/src/pages/playground/PlaygroundRunButton.tsx new file mode 100644 index 0000000000..b4d1e2eb33 --- /dev/null +++ b/app/src/pages/playground/PlaygroundRunButton.tsx @@ -0,0 +1,28 @@ +import React from "react"; + +import { Button, Icon, Icons } from "@arizeai/components"; + +import { usePlaygroundContext } from "@phoenix/contexts/PlaygroundContext"; + +export function PlaygroundRunButton() { + const runPlaygroundInstances = usePlaygroundContext( + (state) => state.runPlaygroundInstances + ); + const isRunning = usePlaygroundContext((state) => + state.instances.some((instance) => instance.isRunning) + ); + return ( + + ); +} diff --git a/app/src/pages/playground/PlaygroundTemplate.tsx b/app/src/pages/playground/PlaygroundTemplate.tsx index 579cd47f7e..b3e75e5ac1 100644 --- a/app/src/pages/playground/PlaygroundTemplate.tsx +++ b/app/src/pages/playground/PlaygroundTemplate.tsx @@ -1,18 +1,80 @@ import React from "react"; -import { Card } from "@arizeai/components"; +import { + Button, + Card, + Content, + Flex, + Icon, + Icons, + Tooltip, + TooltipTrigger, + TriggerWrap, +} from "@arizeai/components"; +import { AlphabeticIndexIcon } from "@phoenix/components/AlphabeticIndexIcon"; import { usePlaygroundContext } from "@phoenix/contexts/PlaygroundContext"; -export function PlaygroundTemplate() { - const operationType = usePlaygroundContext((state) => state.operationType); +import { ModelConfigButton } from "./ModelConfigButton"; +import { PlaygroundChatTemplate } from "./PlaygroundChatTemplate"; +import { PlaygroundInstanceProps } from "./types"; + +interface PlaygroundTemplateProps extends PlaygroundInstanceProps {} + +export function PlaygroundTemplate(props: PlaygroundTemplateProps) { + const instanceId = props.playgroundInstanceId; + const instances = usePlaygroundContext((state) => state.instances); + const instance = instances.find((instance) => instance.id === instanceId); + const index = instances.findIndex((instance) => instance.id === instanceId); + if (!instance) { + throw new Error(`Playground instance ${instanceId} not found`); + } + const { template } = instance; + return ( - - {operationType === "chat" ? ( -
    Chat Template goes here
    + + + Prompt + + } + collapsible + variant="compact" + bodyStyle={{ padding: 0 }} + extra={ + + + {instances.length > 1 ? : null} + + } + > + {template.__type === "chat" ? ( + ) : ( -
    Completion Template goes here
    + "Completion Template" )}
    ); } + +function DeleteButton(props: PlaygroundInstanceProps) { + const deleteInstance = usePlaygroundContext((state) => state.deleteInstance); + return ( + + + + } + >{`Replay and iterate on your LLM call from your ${span.project.name} project`} + )} + {showParsingErrorsBanner && hasParsingErrors && ( + { + setShowParsingErrorsBanner(false); + }} + title="The following errors occurred when parsing span attributes:" + > +
      + {parsingErrors.map((error) => ( +
    • {error}
    • + ))} +
    +
    + )} + + ); +} diff --git a/app/src/pages/playground/TemplateLanguageRadioGroup.tsx b/app/src/pages/playground/TemplateLanguageRadioGroup.tsx new file mode 100644 index 0000000000..a967443312 --- /dev/null +++ b/app/src/pages/playground/TemplateLanguageRadioGroup.tsx @@ -0,0 +1,34 @@ +import React from "react"; + +import { Radio, RadioGroup } from "@arizeai/components"; + +import { TemplateLanguages } from "@phoenix/components/templateEditor/constants"; +import { isTemplateLanguage } from "@phoenix/components/templateEditor/types"; +import { usePlaygroundContext } from "@phoenix/contexts/PlaygroundContext"; + +export function TemplateLanguageRadioGroup() { + const language = usePlaygroundContext((state) => state.templateLanguage); + const setLanguage = usePlaygroundContext( + (state) => state.setTemplateLanguage + ); + return ( + { + if (isTemplateLanguage(v)) { + setLanguage(v); + } + }} + > + + Mustache + + + F-String + + + ); +} diff --git a/app/src/pages/playground/TitleWithAlphabeticIndex.tsx b/app/src/pages/playground/TitleWithAlphabeticIndex.tsx new file mode 100644 index 0000000000..af6bba26dc --- /dev/null +++ b/app/src/pages/playground/TitleWithAlphabeticIndex.tsx @@ -0,0 +1,23 @@ +import React from "react"; + +import { Flex } from "@arizeai/components"; + +import { AlphabeticIndexIcon } from "@phoenix/components/AlphabeticIndexIcon"; + +/** + * Display the alphabetic index and title in a single line + */ +export function TitleWithAlphabeticIndex({ + index, + title, +}: { + index: number; + title: string; +}) { + return ( + + + {title} + + ); +} diff --git a/app/src/pages/playground/VariableEditor.tsx b/app/src/pages/playground/VariableEditor.tsx new file mode 100644 index 0000000000..17b9b93b9f --- /dev/null +++ b/app/src/pages/playground/VariableEditor.tsx @@ -0,0 +1,51 @@ +import React from "react"; +import { githubLight } from "@uiw/codemirror-theme-github"; +import { nord } from "@uiw/codemirror-theme-nord"; +import ReactCodeMirror, { + BasicSetupOptions, + EditorView, +} from "@uiw/react-codemirror"; + +import { Field } from "@arizeai/components"; + +import { CodeWrap } from "@phoenix/components/code"; +import { useTheme } from "@phoenix/contexts"; + +type VariableEditorProps = { + label?: string; + value?: string; + onChange?: (value: string) => void; +}; + +const basicSetupOptions: BasicSetupOptions = { + lineNumbers: false, + highlightActiveLine: false, + foldGutter: false, + highlightActiveLineGutter: false, + bracketMatching: false, + syntaxHighlighting: false, +}; + +const extensions = [EditorView.lineWrapping]; + +export const VariableEditor = ({ + label, + value, + onChange, +}: VariableEditorProps) => { + const { theme } = useTheme(); + const codeMirrorTheme = theme === "light" ? githubLight : nord; + return ( + + + + + + ); +}; diff --git a/app/src/pages/playground/__generated__/ModelConfigButtonDialogQuery.graphql.ts b/app/src/pages/playground/__generated__/ModelConfigButtonDialogQuery.graphql.ts new file mode 100644 index 0000000000..187da00815 --- /dev/null +++ b/app/src/pages/playground/__generated__/ModelConfigButtonDialogQuery.graphql.ts @@ -0,0 +1,120 @@ +/** + * @generated SignedSource<<176456afea57f0245ab80564600db337>> + * @lightSyntaxTransform + * @nogrep + */ + +/* tslint:disable */ +/* eslint-disable */ +// @ts-nocheck + +import { ConcreteRequest, Query } from 'relay-runtime'; +import { FragmentRefs } from "relay-runtime"; +export type GenerativeProviderKey = "ANTHROPIC" | "AZURE_OPENAI" | "OPENAI"; +export type ModelConfigButtonDialogQuery$variables = { + providerKey: GenerativeProviderKey; +}; +export type ModelConfigButtonDialogQuery$data = { + readonly " $fragmentSpreads": FragmentRefs<"ModelPickerFragment" | "ModelProviderPickerFragment">; +}; +export type ModelConfigButtonDialogQuery = { + response: ModelConfigButtonDialogQuery$data; + variables: ModelConfigButtonDialogQuery$variables; +}; + +const node: ConcreteRequest = (function(){ +var v0 = [ + { + "defaultValue": null, + "kind": "LocalArgument", + "name": "providerKey" + } +], +v1 = [ + { + "kind": "Variable", + "name": "providerKey", + "variableName": "providerKey" + } +]; +return { + "fragment": { + "argumentDefinitions": (v0/*: any*/), + "kind": "Fragment", + "metadata": null, + "name": "ModelConfigButtonDialogQuery", + "selections": [ + { + "args": null, + "kind": "FragmentSpread", + "name": "ModelProviderPickerFragment" + }, + { + "args": (v1/*: any*/), + "kind": "FragmentSpread", + "name": "ModelPickerFragment" + } + ], + "type": "Query", + "abstractKey": null + }, + "kind": "Request", + "operation": { + "argumentDefinitions": (v0/*: any*/), + "kind": "Operation", + "name": "ModelConfigButtonDialogQuery", + "selections": [ + { + "alias": null, + "args": null, + "concreteType": "GenerativeProvider", + "kind": "LinkedField", + "name": "modelProviders", + "plural": true, + "selections": [ + { + "alias": null, + "args": null, + "kind": "ScalarField", + "name": "key", + "storageKey": null + }, + { + "alias": null, + "args": null, + "kind": "ScalarField", + "name": "name", + "storageKey": null + } + ], + "storageKey": null + }, + { + "alias": null, + "args": [ + { + "fields": (v1/*: any*/), + "kind": "ObjectValue", + "name": "input" + } + ], + "kind": "ScalarField", + "name": "modelNames", + "storageKey": null + } + ] + }, + "params": { + "cacheID": "34f8d81e91b335ca310c9be756719426", + "id": null, + "metadata": {}, + "name": "ModelConfigButtonDialogQuery", + "operationKind": "query", + "text": "query ModelConfigButtonDialogQuery(\n $providerKey: GenerativeProviderKey!\n) {\n ...ModelProviderPickerFragment\n ...ModelPickerFragment_3rERSq\n}\n\nfragment ModelPickerFragment_3rERSq on Query {\n modelNames(input: {providerKey: $providerKey})\n}\n\nfragment ModelProviderPickerFragment on Query {\n modelProviders {\n key\n name\n }\n}\n" + } +}; +})(); + +(node as any).hash = "c9b38e766093b2378047d22b01ef0fbf"; + +export default node; diff --git a/app/src/pages/playground/__generated__/ModelPickerFragment.graphql.ts b/app/src/pages/playground/__generated__/ModelPickerFragment.graphql.ts new file mode 100644 index 0000000000..9dda921ba2 --- /dev/null +++ b/app/src/pages/playground/__generated__/ModelPickerFragment.graphql.ts @@ -0,0 +1,60 @@ +/** + * @generated SignedSource<<6931dc528aea2b22801320e6d297dd58>> + * @lightSyntaxTransform + * @nogrep + */ + +/* tslint:disable */ +/* eslint-disable */ +// @ts-nocheck + +import { Fragment, ReaderFragment } from 'relay-runtime'; +import { FragmentRefs } from "relay-runtime"; +export type ModelPickerFragment$data = { + readonly modelNames: ReadonlyArray; + readonly " $fragmentType": "ModelPickerFragment"; +}; +export type ModelPickerFragment$key = { + readonly " $data"?: ModelPickerFragment$data; + readonly " $fragmentSpreads": FragmentRefs<"ModelPickerFragment">; +}; + +const node: ReaderFragment = { + "argumentDefinitions": [ + { + "defaultValue": "OPENAI", + "kind": "LocalArgument", + "name": "providerKey" + } + ], + "kind": "Fragment", + "metadata": null, + "name": "ModelPickerFragment", + "selections": [ + { + "alias": null, + "args": [ + { + "fields": [ + { + "kind": "Variable", + "name": "providerKey", + "variableName": "providerKey" + } + ], + "kind": "ObjectValue", + "name": "input" + } + ], + "kind": "ScalarField", + "name": "modelNames", + "storageKey": null + } + ], + "type": "Query", + "abstractKey": null +}; + +(node as any).hash = "bb2557396c978bb5f57c7a4f67d756b1"; + +export default node; diff --git a/app/src/pages/playground/__generated__/ModelProviderPickerFragment.graphql.ts b/app/src/pages/playground/__generated__/ModelProviderPickerFragment.graphql.ts new file mode 100644 index 0000000000..fd487f655f --- /dev/null +++ b/app/src/pages/playground/__generated__/ModelProviderPickerFragment.graphql.ts @@ -0,0 +1,64 @@ +/** + * @generated SignedSource<<8d3d09b89a6d54cc8b22d75946b7094b>> + * @lightSyntaxTransform + * @nogrep + */ + +/* tslint:disable */ +/* eslint-disable */ +// @ts-nocheck + +import { Fragment, ReaderFragment } from 'relay-runtime'; +export type GenerativeProviderKey = "ANTHROPIC" | "AZURE_OPENAI" | "OPENAI"; +import { FragmentRefs } from "relay-runtime"; +export type ModelProviderPickerFragment$data = { + readonly modelProviders: ReadonlyArray<{ + readonly key: GenerativeProviderKey; + readonly name: string; + }>; + readonly " $fragmentType": "ModelProviderPickerFragment"; +}; +export type ModelProviderPickerFragment$key = { + readonly " $data"?: ModelProviderPickerFragment$data; + readonly " $fragmentSpreads": FragmentRefs<"ModelProviderPickerFragment">; +}; + +const node: ReaderFragment = { + "argumentDefinitions": [], + "kind": "Fragment", + "metadata": null, + "name": "ModelProviderPickerFragment", + "selections": [ + { + "alias": null, + "args": null, + "concreteType": "GenerativeProvider", + "kind": "LinkedField", + "name": "modelProviders", + "plural": true, + "selections": [ + { + "alias": null, + "args": null, + "kind": "ScalarField", + "name": "key", + "storageKey": null + }, + { + "alias": null, + "args": null, + "kind": "ScalarField", + "name": "name", + "storageKey": null + } + ], + "storageKey": null + } + ], + "type": "Query", + "abstractKey": null +}; + +(node as any).hash = "c83e86a2772127916f7387dca27b74ce"; + +export default node; diff --git a/app/src/pages/playground/__generated__/PlaygroundOutputSubscription.graphql.ts b/app/src/pages/playground/__generated__/PlaygroundOutputSubscription.graphql.ts new file mode 100644 index 0000000000..d560c6a348 --- /dev/null +++ b/app/src/pages/playground/__generated__/PlaygroundOutputSubscription.graphql.ts @@ -0,0 +1,233 @@ +/** + * @generated SignedSource<> + * @lightSyntaxTransform + * @nogrep + */ + +/* tslint:disable */ +/* eslint-disable */ +// @ts-nocheck + +import { ConcreteRequest, GraphQLSubscription } from 'relay-runtime'; +export type ChatCompletionMessageRole = "AI" | "SYSTEM" | "TOOL" | "USER"; +export type GenerativeProviderKey = "ANTHROPIC" | "AZURE_OPENAI" | "OPENAI"; +export type ChatCompletionMessageInput = { + content: any; + role: ChatCompletionMessageRole; +}; +export type GenerativeModelInput = { + name: string; + providerKey: GenerativeProviderKey; +}; +export type InvocationParameters = { + maxCompletionTokens?: number | null; + maxTokens?: number | null; + seed?: number | null; + stop?: ReadonlyArray | null; + temperature?: number | null; + toolChoice?: any | null; + topP?: number | null; +}; +export type PlaygroundOutputSubscription$variables = { + apiKey?: string | null; + invocationParameters: InvocationParameters; + messages: ReadonlyArray; + model: GenerativeModelInput; + tools?: ReadonlyArray | null; +}; +export type PlaygroundOutputSubscription$data = { + readonly chatCompletion: { + readonly __typename: "TextChunk"; + readonly content: string; + } | { + readonly __typename: "ToolCallChunk"; + readonly function: { + readonly arguments: string; + readonly name: string; + }; + readonly id: string; + } | { + // This will never be '%other', but we need some + // value in case none of the concrete values match. + readonly __typename: "%other"; + }; +}; +export type PlaygroundOutputSubscription = { + response: PlaygroundOutputSubscription$data; + variables: PlaygroundOutputSubscription$variables; +}; + +const node: ConcreteRequest = (function(){ +var v0 = { + "defaultValue": null, + "kind": "LocalArgument", + "name": "apiKey" +}, +v1 = { + "defaultValue": null, + "kind": "LocalArgument", + "name": "invocationParameters" +}, +v2 = { + "defaultValue": null, + "kind": "LocalArgument", + "name": "messages" +}, +v3 = { + "defaultValue": null, + "kind": "LocalArgument", + "name": "model" +}, +v4 = { + "defaultValue": null, + "kind": "LocalArgument", + "name": "tools" +}, +v5 = [ + { + "alias": null, + "args": [ + { + "fields": [ + { + "kind": "Variable", + "name": "apiKey", + "variableName": "apiKey" + }, + { + "kind": "Variable", + "name": "invocationParameters", + "variableName": "invocationParameters" + }, + { + "kind": "Variable", + "name": "messages", + "variableName": "messages" + }, + { + "kind": "Variable", + "name": "model", + "variableName": "model" + }, + { + "kind": "Variable", + "name": "tools", + "variableName": "tools" + } + ], + "kind": "ObjectValue", + "name": "input" + } + ], + "concreteType": null, + "kind": "LinkedField", + "name": "chatCompletion", + "plural": false, + "selections": [ + { + "alias": null, + "args": null, + "kind": "ScalarField", + "name": "__typename", + "storageKey": null + }, + { + "kind": "InlineFragment", + "selections": [ + { + "alias": null, + "args": null, + "kind": "ScalarField", + "name": "content", + "storageKey": null + } + ], + "type": "TextChunk", + "abstractKey": null + }, + { + "kind": "InlineFragment", + "selections": [ + { + "alias": null, + "args": null, + "kind": "ScalarField", + "name": "id", + "storageKey": null + }, + { + "alias": null, + "args": null, + "concreteType": "FunctionCallChunk", + "kind": "LinkedField", + "name": "function", + "plural": false, + "selections": [ + { + "alias": null, + "args": null, + "kind": "ScalarField", + "name": "name", + "storageKey": null + }, + { + "alias": null, + "args": null, + "kind": "ScalarField", + "name": "arguments", + "storageKey": null + } + ], + "storageKey": null + } + ], + "type": "ToolCallChunk", + "abstractKey": null + } + ], + "storageKey": null + } +]; +return { + "fragment": { + "argumentDefinitions": [ + (v0/*: any*/), + (v1/*: any*/), + (v2/*: any*/), + (v3/*: any*/), + (v4/*: any*/) + ], + "kind": "Fragment", + "metadata": null, + "name": "PlaygroundOutputSubscription", + "selections": (v5/*: any*/), + "type": "Subscription", + "abstractKey": null + }, + "kind": "Request", + "operation": { + "argumentDefinitions": [ + (v2/*: any*/), + (v3/*: any*/), + (v1/*: any*/), + (v4/*: any*/), + (v0/*: any*/) + ], + "kind": "Operation", + "name": "PlaygroundOutputSubscription", + "selections": (v5/*: any*/) + }, + "params": { + "cacheID": "924d84f911c5156af0abb2a371d098f2", + "id": null, + "metadata": {}, + "name": "PlaygroundOutputSubscription", + "operationKind": "subscription", + "text": "subscription PlaygroundOutputSubscription(\n $messages: [ChatCompletionMessageInput!]!\n $model: GenerativeModelInput!\n $invocationParameters: InvocationParameters!\n $tools: [JSON!]\n $apiKey: String\n) {\n chatCompletion(input: {messages: $messages, model: $model, invocationParameters: $invocationParameters, tools: $tools, apiKey: $apiKey}) {\n __typename\n ... on TextChunk {\n content\n }\n ... on ToolCallChunk {\n id\n function {\n name\n arguments\n }\n }\n }\n}\n" + } +}; +})(); + +(node as any).hash = "30b9973d6ea69054a907549af97c0e5f"; + +export default node; diff --git a/app/src/pages/playground/__generated__/spanPlaygroundPageLoaderQuery.graphql.ts b/app/src/pages/playground/__generated__/spanPlaygroundPageLoaderQuery.graphql.ts new file mode 100644 index 0000000000..3fbf6ebc00 --- /dev/null +++ b/app/src/pages/playground/__generated__/spanPlaygroundPageLoaderQuery.graphql.ts @@ -0,0 +1,201 @@ +/** + * @generated SignedSource<<6af837046e4f840154a1a6141403e108>> + * @lightSyntaxTransform + * @nogrep + */ + +/* tslint:disable */ +/* eslint-disable */ +// @ts-nocheck + +import { ConcreteRequest, Query } from 'relay-runtime'; +export type spanPlaygroundPageLoaderQuery$variables = { + spanId: string; +}; +export type spanPlaygroundPageLoaderQuery$data = { + readonly span: { + readonly __typename: "Span"; + readonly attributes: string; + readonly context: { + readonly spanId: string; + readonly traceId: string; + }; + readonly id: string; + readonly project: { + readonly id: string; + readonly name: string; + }; + } | { + // This will never be '%other', but we need some + // value in case none of the concrete values match. + readonly __typename: "%other"; + }; +}; +export type spanPlaygroundPageLoaderQuery = { + response: spanPlaygroundPageLoaderQuery$data; + variables: spanPlaygroundPageLoaderQuery$variables; +}; + +const node: ConcreteRequest = (function(){ +var v0 = [ + { + "defaultValue": null, + "kind": "LocalArgument", + "name": "spanId" + } +], +v1 = [ + { + "kind": "Variable", + "name": "id", + "variableName": "spanId" + } +], +v2 = { + "alias": null, + "args": null, + "kind": "ScalarField", + "name": "__typename", + "storageKey": null +}, +v3 = { + "alias": null, + "args": null, + "kind": "ScalarField", + "name": "id", + "storageKey": null +}, +v4 = { + "alias": null, + "args": null, + "concreteType": "Project", + "kind": "LinkedField", + "name": "project", + "plural": false, + "selections": [ + (v3/*: any*/), + { + "alias": null, + "args": null, + "kind": "ScalarField", + "name": "name", + "storageKey": null + } + ], + "storageKey": null +}, +v5 = { + "alias": null, + "args": null, + "concreteType": "SpanContext", + "kind": "LinkedField", + "name": "context", + "plural": false, + "selections": [ + { + "alias": null, + "args": null, + "kind": "ScalarField", + "name": "spanId", + "storageKey": null + }, + { + "alias": null, + "args": null, + "kind": "ScalarField", + "name": "traceId", + "storageKey": null + } + ], + "storageKey": null +}, +v6 = { + "alias": null, + "args": null, + "kind": "ScalarField", + "name": "attributes", + "storageKey": null +}; +return { + "fragment": { + "argumentDefinitions": (v0/*: any*/), + "kind": "Fragment", + "metadata": null, + "name": "spanPlaygroundPageLoaderQuery", + "selections": [ + { + "alias": "span", + "args": (v1/*: any*/), + "concreteType": null, + "kind": "LinkedField", + "name": "node", + "plural": false, + "selections": [ + (v2/*: any*/), + { + "kind": "InlineFragment", + "selections": [ + (v3/*: any*/), + (v4/*: any*/), + (v5/*: any*/), + (v6/*: any*/) + ], + "type": "Span", + "abstractKey": null + } + ], + "storageKey": null + } + ], + "type": "Query", + "abstractKey": null + }, + "kind": "Request", + "operation": { + "argumentDefinitions": (v0/*: any*/), + "kind": "Operation", + "name": "spanPlaygroundPageLoaderQuery", + "selections": [ + { + "alias": "span", + "args": (v1/*: any*/), + "concreteType": null, + "kind": "LinkedField", + "name": "node", + "plural": false, + "selections": [ + (v2/*: any*/), + { + "kind": "TypeDiscriminator", + "abstractKey": "__isNode" + }, + (v3/*: any*/), + { + "kind": "InlineFragment", + "selections": [ + (v4/*: any*/), + (v5/*: any*/), + (v6/*: any*/) + ], + "type": "Span", + "abstractKey": null + } + ], + "storageKey": null + } + ] + }, + "params": { + "cacheID": "d1df608d16a0a1d1451bcbac7dcfc853", + "id": null, + "metadata": {}, + "name": "spanPlaygroundPageLoaderQuery", + "operationKind": "query", + "text": "query spanPlaygroundPageLoaderQuery(\n $spanId: GlobalID!\n) {\n span: node(id: $spanId) {\n __typename\n ... on Span {\n id\n project {\n id\n name\n }\n context {\n spanId\n traceId\n }\n attributes\n }\n __isNode: __typename\n id\n }\n}\n" + } +}; +})(); + +(node as any).hash = "d2fd0049ebec80d3b18827b327a91319"; + +export default node; diff --git a/app/src/pages/playground/__tests__/fixtures.ts b/app/src/pages/playground/__tests__/fixtures.ts new file mode 100644 index 0000000000..bf647d9dc0 --- /dev/null +++ b/app/src/pages/playground/__tests__/fixtures.ts @@ -0,0 +1,46 @@ +import { PlaygroundSpan } from "../spanPlaygroundPageLoader"; + +export const basePlaygroundSpan: PlaygroundSpan = { + __typename: "Span", + id: "fake-id", + context: { + traceId: "test", + spanId: "test", + }, + project: { + id: "test", + name: "test", + }, + attributes: "", +}; +export const spanAttributesWithInputMessages = { + llm: { + output_messages: [ + { + message: { + content: "This is an AI Answer", + role: "assistant", + }, + }, + ], + model_name: "gpt-3.5-turbo", + token_count: { completion: 9.0, prompt: 1881.0, total: 1890.0 }, + input_messages: [ + { + message: { + content: "You are a chatbot", + role: "system", + }, + }, + { + message: { + content: "hello?", + role: "user", + }, + }, + ], + invocation_parameters: + '{"context_window": 16384, "num_output": -1, "is_chat_model": true, "is_function_calling_model": true, "model_name": "gpt-3.5-turbo"}', + }, + openinference: { span: { kind: "LLM" } }, +} as const; diff --git a/app/src/pages/playground/__tests__/playgroundUtils.test.ts b/app/src/pages/playground/__tests__/playgroundUtils.test.ts new file mode 100644 index 0000000000..2f505c0a19 --- /dev/null +++ b/app/src/pages/playground/__tests__/playgroundUtils.test.ts @@ -0,0 +1,334 @@ +import { DEFAULT_MODEL_PROVIDER } from "@phoenix/constants/generativeConstants"; +import { + _resetInstanceId, + _resetMessageId, + PlaygroundInstance, +} from "@phoenix/store"; + +import { + INPUT_MESSAGES_PARSING_ERROR, + MODEL_NAME_PARSING_ERROR, + OUTPUT_MESSAGES_PARSING_ERROR, + OUTPUT_VALUE_PARSING_ERROR, + SPAN_ATTRIBUTES_PARSING_ERROR, +} from "../constants"; +import { + getChatRole, + getModelProviderFromModelName, + transformSpanAttributesToPlaygroundInstance, +} from "../playgroundUtils"; + +import { + basePlaygroundSpan, + spanAttributesWithInputMessages, +} from "./fixtures"; + +const expectedPlaygroundInstanceWithIO: PlaygroundInstance = { + id: 0, + activeRunId: null, + isRunning: false, + model: { + provider: "OPENAI", + modelName: "gpt-3.5-turbo", + }, + input: { variableKeys: [], variablesValueCache: {} }, + tools: [], + toolChoice: undefined, + template: { + __type: "chat", + // These id's are not 0, 1, 2, because we create a playground instance (including messages) at the top of the transformSpanAttributesToPlaygroundInstance function + // Doing so increments the message id counter + messages: [ + { id: 2, content: "You are a chatbot", role: "system" }, + { id: 3, content: "hello?", role: "user" }, + ], + }, + output: [{ id: 4, content: "This is an AI Answer", role: "ai" }], +}; + +const defaultTemplate = { + __type: "chat", + messages: [ + { + id: 0, + role: "system", + content: "You are a chatbot", + }, + { + id: 1, + role: "user", + content: "{{question}}", + }, + ], +}; + +describe("transformSpanAttributesToPlaygroundInstance", () => { + beforeEach(() => { + _resetInstanceId(); + _resetMessageId(); + }); + it("should return the default instance with parsing errors if the span attributes are unparsable", () => { + const span = { + ...basePlaygroundSpan, + attributes: "invalid json", + }; + expect(transformSpanAttributesToPlaygroundInstance(span)).toStrictEqual({ + playgroundInstance: { + ...expectedPlaygroundInstanceWithIO, + model: { + provider: "OPENAI", + modelName: "gpt-4o", + }, + template: defaultTemplate, + output: undefined, + }, + parsingErrors: [SPAN_ATTRIBUTES_PARSING_ERROR], + }); + }); + + it("should return the default instance with parsing errors if the attributes don't contain any information", () => { + const span = { + ...basePlaygroundSpan, + attributes: JSON.stringify({}), + }; + expect(transformSpanAttributesToPlaygroundInstance(span)).toStrictEqual({ + playgroundInstance: { + ...expectedPlaygroundInstanceWithIO, + model: { + provider: "OPENAI", + modelName: "gpt-4o", + }, + template: defaultTemplate, + + output: undefined, + }, + parsingErrors: [ + INPUT_MESSAGES_PARSING_ERROR, + OUTPUT_MESSAGES_PARSING_ERROR, + OUTPUT_VALUE_PARSING_ERROR, + MODEL_NAME_PARSING_ERROR, + ], + }); + }); + + it("should return a PlaygroundInstance with template messages and output parsing errors if the attributes contain llm.input_messages", () => { + const span = { + ...basePlaygroundSpan, + attributes: JSON.stringify({ + ...spanAttributesWithInputMessages, + llm: { + ...spanAttributesWithInputMessages.llm, + output_messages: undefined, + }, + }), + }; + expect(transformSpanAttributesToPlaygroundInstance(span)).toEqual({ + playgroundInstance: { + ...expectedPlaygroundInstanceWithIO, + output: undefined, + }, + parsingErrors: [ + OUTPUT_MESSAGES_PARSING_ERROR, + OUTPUT_VALUE_PARSING_ERROR, + ], + }); + }); + + it("should fallback to output.value if output_messages is not present", () => { + const span = { + ...basePlaygroundSpan, + attributes: JSON.stringify({ + ...spanAttributesWithInputMessages, + llm: { + ...spanAttributesWithInputMessages.llm, + output_messages: undefined, + }, + output: { + value: "This is an AI Answer", + }, + }), + }; + + expect(transformSpanAttributesToPlaygroundInstance(span)).toEqual({ + playgroundInstance: { + ...expectedPlaygroundInstanceWithIO, + + output: "This is an AI Answer", + }, + parsingErrors: [OUTPUT_MESSAGES_PARSING_ERROR], + }); + }); + + it("should return a PlaygroundInstance if the attributes contain llm.input_messages and output_messages", () => { + const span = { + ...basePlaygroundSpan, + attributes: JSON.stringify(spanAttributesWithInputMessages), + }; + expect(transformSpanAttributesToPlaygroundInstance(span)).toEqual({ + playgroundInstance: expectedPlaygroundInstanceWithIO, + parsingErrors: [], + }); + }); + + it("should normalize message roles in input and output messages", () => { + const span = { + ...basePlaygroundSpan, + attributes: JSON.stringify({ + llm: { + model_name: "gpt-4o", + input_messages: [ + { + message: { + role: "human", + content: "You are a chatbot", + }, + }, + ], + output_messages: [ + { + message: { + role: "assistant", + content: "This is an AI Answer", + }, + }, + ], + }, + }), + }; + expect(transformSpanAttributesToPlaygroundInstance(span)).toEqual({ + playgroundInstance: { + ...expectedPlaygroundInstanceWithIO, + model: { + provider: "OPENAI", + modelName: "gpt-4o", + }, + template: { + __type: "chat", + messages: [ + { + id: 2, + role: "user", + content: "You are a chatbot", + }, + ], + }, + output: [{ id: 3, content: "This is an AI Answer", role: "ai" }], + }, + parsingErrors: [], + }); + }); + + it("should correctly parse the model name and infer the provider", () => { + const openAiAttributes = JSON.stringify({ + ...spanAttributesWithInputMessages, + llm: { + ...spanAttributesWithInputMessages.llm, + model_name: "gpt-3.5-turbo", + }, + }); + const anthropicAttributes = JSON.stringify({ + ...spanAttributesWithInputMessages, + llm: { + ...spanAttributesWithInputMessages.llm, + model_name: "claude-3-5-sonnet-20240620", + }, + }); + const unknownAttributes = JSON.stringify({ + ...spanAttributesWithInputMessages, + llm: { + ...spanAttributesWithInputMessages.llm, + model_name: "test-my-deployment", + }, + }); + + expect( + transformSpanAttributesToPlaygroundInstance({ + ...basePlaygroundSpan, + attributes: openAiAttributes, + }) + ).toEqual({ + playgroundInstance: { + ...expectedPlaygroundInstanceWithIO, + model: { + provider: "OPENAI", + modelName: "gpt-3.5-turbo", + }, + }, + parsingErrors: [], + }); + + _resetMessageId(); + _resetInstanceId(); + + expect( + transformSpanAttributesToPlaygroundInstance({ + ...basePlaygroundSpan, + attributes: anthropicAttributes, + }) + ).toEqual({ + playgroundInstance: { + ...expectedPlaygroundInstanceWithIO, + model: { + provider: "ANTHROPIC", + modelName: "claude-3-5-sonnet-20240620", + }, + }, + parsingErrors: [], + }); + + _resetMessageId(); + _resetInstanceId(); + + expect( + transformSpanAttributesToPlaygroundInstance({ + ...basePlaygroundSpan, + attributes: unknownAttributes, + }) + ).toEqual({ + playgroundInstance: { + ...expectedPlaygroundInstanceWithIO, + model: { + provider: DEFAULT_MODEL_PROVIDER, + modelName: "test-my-deployment", + }, + }, + parsingErrors: [], + }); + }); +}); + +describe("getChatRole", () => { + it("should return the role if it is a valid ChatMessageRole", () => { + expect(getChatRole("user")).toEqual("user"); + }); + + it("should return the ChatMessageRole if the role is included in ChatRoleMap", () => { + expect(getChatRole("assistant")).toEqual("ai"); + expect(getChatRole("bot")).toEqual("ai"); + expect(getChatRole("system")).toEqual("system"); + expect(getChatRole("human:")).toEqual("user"); + }); + + it("should return DEFAULT_CHAT_ROLE if the role is not found", () => { + expect(getChatRole("invalid")).toEqual("user"); + }); +}); + +describe("getModelProviderFromModelName", () => { + it("should return OPENAI if the model name includes 'gpt' or 'o1'", () => { + expect(getModelProviderFromModelName("gpt-3.5-turbo")).toEqual("OPENAI"); + expect(getModelProviderFromModelName("o1")).toEqual("OPENAI"); + }); + + it("should return ANTHROPIC if the model name includes 'claude'", () => { + expect(getModelProviderFromModelName("claude-3-5-sonnet-20240620")).toEqual( + "ANTHROPIC" + ); + }); + + it(`should return ${DEFAULT_MODEL_PROVIDER} if the model name does not match any known models`, () => { + expect(getModelProviderFromModelName("test-my-model")).toEqual( + DEFAULT_MODEL_PROVIDER + ); + }); +}); diff --git a/app/src/pages/playground/constants.tsx b/app/src/pages/playground/constants.tsx new file mode 100644 index 0000000000..90be404948 --- /dev/null +++ b/app/src/pages/playground/constants.tsx @@ -0,0 +1,32 @@ +export const NUM_MAX_PLAYGROUND_INSTANCES = 4; + +/** + * Map of {@link ChatMessageRole} to potential role values. + * Used to map roles to a canonical role. + */ +export const ChatRoleMap: Record = { + user: ["user", "human"], + ai: ["assistant", "bot", "ai"], + system: ["system"], + tool: ["tool"], +}; + +/** + * Parsing errors for parsing a span to a playground instance + */ +export const INPUT_MESSAGES_PARSING_ERROR = + "Unable to parse span input messages, expected messages which include a role and content."; +export const OUTPUT_MESSAGES_PARSING_ERROR = + "Unable to parse span output messages, expected messages which include a role and content."; +export const OUTPUT_VALUE_PARSING_ERROR = + "Unable to parse span output expected output.value to be present."; +export const SPAN_ATTRIBUTES_PARSING_ERROR = + "Unable to parse span attributes, attributes must be valid JSON."; +export const MODEL_NAME_PARSING_ERROR = + "Unable to parse model name, expected llm.model_name to be present."; + +export const modelProviderToModelPrefixMap: Record = { + AZURE_OPENAI: [], + ANTHROPIC: ["claude"], + OPENAI: ["gpt", "o1"], +}; diff --git a/app/src/pages/playground/index.tsx b/app/src/pages/playground/index.tsx index 9bc400653a..ecc7e76730 100644 --- a/app/src/pages/playground/index.tsx +++ b/app/src/pages/playground/index.tsx @@ -1 +1,3 @@ export * from "./PlaygroundPage"; +export * from "./SpanPlaygroundPage"; +export * from "./spanPlaygroundPageLoader"; diff --git a/app/src/pages/playground/playgroundUtils.ts b/app/src/pages/playground/playgroundUtils.ts new file mode 100644 index 0000000000..04f10cf16f --- /dev/null +++ b/app/src/pages/playground/playgroundUtils.ts @@ -0,0 +1,242 @@ +import { + DEFAULT_CHAT_ROLE, + DEFAULT_MODEL_PROVIDER, +} from "@phoenix/constants/generativeConstants"; +import { ModelConfig, PlaygroundInstance } from "@phoenix/store"; +import { + ChatMessage, + createPlaygroundInstance, + generateMessageId, +} from "@phoenix/store"; +import { safelyParseJSON } from "@phoenix/utils/jsonUtils"; + +import { + ChatRoleMap, + INPUT_MESSAGES_PARSING_ERROR, + MODEL_NAME_PARSING_ERROR, + modelProviderToModelPrefixMap, + OUTPUT_MESSAGES_PARSING_ERROR, + OUTPUT_VALUE_PARSING_ERROR, + SPAN_ATTRIBUTES_PARSING_ERROR, +} from "./constants"; +import { + chatMessageRolesSchema, + chatMessagesSchema, + llmInputMessageSchema, + llmOutputMessageSchema, + MessageSchema, + modelNameSchema, + outputSchema, +} from "./schemas"; +import { PlaygroundSpan } from "./spanPlaygroundPageLoader"; + +/** + * Checks if a string is a valid chat message role + */ +export function isChatMessageRole(role: unknown): role is ChatMessageRole { + return chatMessageRolesSchema.safeParse(role).success; +} + +/** + * Takes a string role and attempts to map the role to a valid ChatMessageRole. + * If the role is not found, it will default to {@link DEFAULT_CHAT_ROLE}. + * @param role the role to map + * @returns ChatMessageRole + * + * NB: Only exported for testing + */ +export function getChatRole(role: string): ChatMessageRole { + if (isChatMessageRole(role)) { + return role; + } + + for (const [chatRole, acceptedValues] of Object.entries(ChatRoleMap)) { + if (acceptedValues.includes(role)) { + return chatRole as ChatMessageRole; + } + } + return DEFAULT_CHAT_ROLE; +} + +/** + * Takes a list of messages from span attributes and transforms them into a list of {@link ChatMessage|ChatMessages} + * @param messages messages from attributes either input or output @see {@link https://github.com/Arize-ai/openinference/blob/main/spec/semantic_conventions.md|Semantic Conventions}} + * returns a list of {@link ChatMessage|ChatMessages} + */ +function processAttributeMessagesToChatMessage( + messages: MessageSchema[] +): ChatMessage[] { + return messages.map(({ message }) => { + return { + id: generateMessageId(), + role: getChatRole(message.role), + content: message.content, + }; + }); +} + +/** + * Attempts to parse the input messages from the span attributes. + * @param parsedAttributes the JSON parsed span attributes + * @returns an object containing the parsed {@link ChatMessage|ChatMessages} and any parsing errors + */ +function getTemplateMessagesFromAttributes(parsedAttributes: unknown) { + const inputMessages = llmInputMessageSchema.safeParse(parsedAttributes); + if (!inputMessages.success) { + return { + messageParsingErrors: [INPUT_MESSAGES_PARSING_ERROR], + messages: null, + }; + } + + return { + messageParsingErrors: [], + messages: processAttributeMessagesToChatMessage( + inputMessages.data.llm.input_messages + ), + }; +} + +/** + * Attempts to get llm.output_messages then output.value from the span attributes. + * @param parsedAttributes the JSON parsed span attributes + * @returns an object containing the parsed output and any parsing errors + */ +function getOutputFromAttributes(parsedAttributes: unknown) { + const outputParsingErrors: string[] = []; + const outputMessages = llmOutputMessageSchema.safeParse(parsedAttributes); + if (outputMessages.success) { + return { + output: processAttributeMessagesToChatMessage( + outputMessages.data.llm.output_messages + ), + outputParsingErrors, + }; + } + + outputParsingErrors.push(OUTPUT_MESSAGES_PARSING_ERROR); + + const parsedOutput = outputSchema.safeParse(parsedAttributes); + if (parsedOutput.success) { + return { + output: parsedOutput.data.output.value, + outputParsingErrors, + }; + } + + outputParsingErrors.push(OUTPUT_VALUE_PARSING_ERROR); + + return { + output: undefined, + outputParsingErrors, + }; +} + +/** + * Attempts to infer the provider of the model from the model name. + * @param modelName the model name to get the provider from + * @returns the provider of the model defaulting to {@link DEFAULT_MODEL_PROVIDER} if the provider cannot be inferred + * + * NB: Only exported for testing + */ +export function getModelProviderFromModelName( + modelName: string +): ModelProvider { + for (const provider of Object.keys(modelProviderToModelPrefixMap)) { + const prefixes = modelProviderToModelPrefixMap[provider as ModelProvider]; + if (prefixes.some((prefix) => modelName.includes(prefix))) { + return provider as ModelProvider; + } + } + return DEFAULT_MODEL_PROVIDER; +} + +/** + * Attempts to get the llm.model_name and inferred provider from the span attributes. + * @param parsedAttributes the JSON parsed span attributes + * @returns the model config if it exists or parsing errors if it does not + */ +function getModelConfigFromAttributes( + parsedAttributes: unknown +): + | { modelConfig: ModelConfig; parsingErrors: never[] } + | { modelConfig: null; parsingErrors: string[] } { + const { success, data } = modelNameSchema.safeParse(parsedAttributes); + if (success) { + return { + modelConfig: { + modelName: data.llm.model_name, + provider: getModelProviderFromModelName(data.llm.model_name), + }, + parsingErrors: [], + }; + } + return { modelConfig: null, parsingErrors: [MODEL_NAME_PARSING_ERROR] }; +} + +/** + * Takes a {@link PlaygroundSpan|Span} and attempts to transform it's attributes into various fields on a {@link PlaygroundInstance}. + * @param span the {@link PlaygroundSpan|Span} to transform into a playground instance + * @returns a {@link PlaygroundInstance} with certain fields pre-populated from the span attributes + */ +export function transformSpanAttributesToPlaygroundInstance( + span: PlaygroundSpan +): { + playgroundInstance: PlaygroundInstance; + /** + * Errors that occurred during parsing of initial playground data. + * For example, when coming from a span to the playground, the span may + * not have the correct attributes, or the attributes may be of the wrong shape. + * This field is used to store any issues encountered when parsing to display in the playground. + */ + parsingErrors: string[]; +} { + const basePlaygroundInstance = createPlaygroundInstance(); + const { json: parsedAttributes, parseError } = safelyParseJSON( + span.attributes + ); + if (parseError) { + return { + playgroundInstance: basePlaygroundInstance, + parsingErrors: [SPAN_ATTRIBUTES_PARSING_ERROR], + }; + } + + const { messages, messageParsingErrors } = + getTemplateMessagesFromAttributes(parsedAttributes); + const { output, outputParsingErrors } = + getOutputFromAttributes(parsedAttributes); + const { modelConfig, parsingErrors: modelConfigParsingErrors } = + getModelConfigFromAttributes(parsedAttributes); + + // TODO(parker): add support for tools, variables, and input / output variants + // https://github.com/Arize-ai/phoenix/issues/4886 + return { + playgroundInstance: { + ...basePlaygroundInstance, + model: modelConfig ?? basePlaygroundInstance.model, + template: + messages != null + ? { + __type: "chat", + messages, + } + : basePlaygroundInstance.template, + output, + }, + parsingErrors: [ + ...messageParsingErrors, + ...outputParsingErrors, + ...modelConfigParsingErrors, + ], + }; +} + +/** + * Checks if something is a valid {@link ChatMessage} + */ +export const isChatMessages = ( + messages: unknown +): messages is ChatMessage[] => { + return chatMessagesSchema.safeParse(messages).success; +}; diff --git a/app/src/pages/playground/schemas.ts b/app/src/pages/playground/schemas.ts new file mode 100644 index 0000000000..cb650ab9cf --- /dev/null +++ b/app/src/pages/playground/schemas.ts @@ -0,0 +1,103 @@ +import { z } from "zod"; + +import { + LLMAttributePostfixes, + MessageAttributePostfixes, + SemanticAttributePrefixes, +} from "@arizeai/openinference-semantic-conventions"; + +import { ChatMessage } from "@phoenix/store"; +import { schemaForType } from "@phoenix/typeUtils"; + +/** + * The zod schema for llm tool calls in an input message + * @see {@link https://github.com/Arize-ai/openinference/blob/main/spec/semantic_conventions.md|Semantic Conventions} + */ +const toolCallSchema = z + .object({ + function: z + .object({ + name: z.string(), + arguments: z.string(), + }) + .partial(), + }) + .partial(); + +/** + * The zod schema for llm messages + * @see {@link https://github.com/Arize-ai/openinference/blob/main/spec/semantic_conventions.md|Semantic Conventions} + */ +const messageSchema = z.object({ + [SemanticAttributePrefixes.message]: z.object({ + [MessageAttributePostfixes.role]: z.string(), + [MessageAttributePostfixes.content]: z.string(), + [MessageAttributePostfixes.tool_calls]: z.array(toolCallSchema).optional(), + }), +}); + +/** + * The type of each message in either the input or output messages + * on a spans attributes + */ +export type MessageSchema = z.infer; + +/** + * The zod schema for llm.input_messages attributes + * @see {@link https://github.com/Arize-ai/openinference/blob/main/spec/semantic_conventions.md|Semantic Conventions} + */ +export const llmInputMessageSchema = z.object({ + [SemanticAttributePrefixes.llm]: z.object({ + [LLMAttributePostfixes.input_messages]: z.array(messageSchema), + }), +}); + +/** + * The zod schema for llm.output_messages attributes + * @see {@link https://github.com/Arize-ai/openinference/blob/main/spec/semantic_conventions.md|Semantic Conventions} + */ +export const llmOutputMessageSchema = z.object({ + [SemanticAttributePrefixes.llm]: z.object({ + [LLMAttributePostfixes.output_messages]: z.array(messageSchema), + }), +}); + +/** + * The zod schema for output attributes + * @see {@link https://github.com/Arize-ai/openinference/blob/main/spec/semantic_conventions.md|Semantic Conventions} + */ +export const outputSchema = z.object({ + [SemanticAttributePrefixes.output]: z.object({ + value: z.string(), + }), +}); + +/** + * The zod schema for {@link chatMessageRoles} + */ +export const chatMessageRolesSchema = schemaForType()( + z.enum(["user", "ai", "system", "tool"]) +); + +const chatMessageSchema = schemaForType()( + z.object({ + id: z.number(), + role: chatMessageRolesSchema, + content: z.string(), + }) +); + +/** + * The zod schema for ChatMessages + */ +export const chatMessagesSchema = z.array(chatMessageSchema); + +/** + * The zod schema for llm model name + * @see {@link https://github.com/Arize-ai/openinference/blob/main/spec/semantic_conventions.md|Semantic Conventions} + */ +export const modelNameSchema = z.object({ + [SemanticAttributePrefixes.llm]: z.object({ + [LLMAttributePostfixes.model_name]: z.string(), + }), +}); diff --git a/app/src/pages/playground/spanPlaygroundPageLoader.ts b/app/src/pages/playground/spanPlaygroundPageLoader.ts new file mode 100644 index 0000000000..58a0cdd234 --- /dev/null +++ b/app/src/pages/playground/spanPlaygroundPageLoader.ts @@ -0,0 +1,51 @@ +import { fetchQuery, graphql } from "react-relay"; +import { LoaderFunctionArgs } from "react-router"; + +import RelayEnvironment from "@phoenix/RelayEnvironment"; + +import { + spanPlaygroundPageLoaderQuery, + spanPlaygroundPageLoaderQuery$data, +} from "./__generated__/spanPlaygroundPageLoaderQuery.graphql"; + +/** + * The type of a span that is fetched to pre-populate the playground. + * This span gets fetched when navigating from a span to the playground, used for span replay. + */ +export type PlaygroundSpan = Extract< + spanPlaygroundPageLoaderQuery$data["span"], + { __typename: "Span" } +>; + +export async function spanPlaygroundPageLoader(args: LoaderFunctionArgs) { + const { spanId } = args.params; + if (!spanId || typeof spanId !== "string") { + throw new Error("Invalid spanId"); + } + const loaderData = await fetchQuery( + RelayEnvironment, + graphql` + query spanPlaygroundPageLoaderQuery($spanId: GlobalID!) { + span: node(id: $spanId) { + __typename + ... on Span { + id + project { + id + name + } + context { + spanId + traceId + } + attributes + } + } + } + `, + { + spanId, + } + ).toPromise(); + return loaderData; +} diff --git a/app/src/pages/playground/types.ts b/app/src/pages/playground/types.ts new file mode 100644 index 0000000000..dc3b097f5e --- /dev/null +++ b/app/src/pages/playground/types.ts @@ -0,0 +1,7 @@ +export interface PlaygroundInstanceProps { + /** + * Multiple playground instances are supported. + * The id is used to identify the instance. + */ + playgroundInstanceId: number; +} diff --git a/app/src/pages/settings/SettingsPage.tsx b/app/src/pages/settings/SettingsPage.tsx index 7cf8da797b..7abc20eecb 100644 --- a/app/src/pages/settings/SettingsPage.tsx +++ b/app/src/pages/settings/SettingsPage.tsx @@ -82,7 +82,7 @@ export function SettingsPage() { function CopyToClipboardButtonWithPadding(props: { text: string }) { return ( - + ); } diff --git a/app/src/pages/trace/SpanCodeDropdown.tsx b/app/src/pages/trace/SpanCodeDropdown.tsx index a7de027a05..e18129acb0 100644 --- a/app/src/pages/trace/SpanCodeDropdown.tsx +++ b/app/src/pages/trace/SpanCodeDropdown.tsx @@ -45,11 +45,11 @@ export function SpanCodeDropdown(props: SpanCodeDropdownProps) {
    - + - +
    diff --git a/app/src/pages/trace/SpanDetails.tsx b/app/src/pages/trace/SpanDetails.tsx index 981d2b6280..6da594418a 100644 --- a/app/src/pages/trace/SpanDetails.tsx +++ b/app/src/pages/trace/SpanDetails.tsx @@ -40,7 +40,6 @@ import { TooltipTrigger, View, ViewProps, - ViewStyleProps, } from "@arizeai/components"; import { DocumentAttributePostfixes, @@ -63,7 +62,9 @@ import { import { SpanKindIcon } from "@phoenix/components/trace"; import { SpanKindLabel } from "@phoenix/components/trace/SpanKindLabel"; import { useNotifySuccess, useTheme } from "@phoenix/contexts"; +import { useFeatureFlag } from "@phoenix/contexts/FeatureFlagsContext"; import { usePreferencesContext } from "@phoenix/contexts/PreferencesContext"; +import { useChatMessageStyles } from "@phoenix/hooks/useChatMessageStyles"; import { AttributeDocument, AttributeEmbedding, @@ -79,6 +80,7 @@ import { isAttributeMessages, } from "@phoenix/openInference/tracing/types"; import { assertUnreachable, isStringArray } from "@phoenix/typeUtils"; +import { safelyParseJSON } from "@phoenix/utils/jsonUtils"; import { formatFloat, numberFormatter } from "@phoenix/utils/numberFormatUtils"; import { RetrievalEvaluationLabel } from "../project/RetrievalEvaluationLabel"; @@ -117,11 +119,7 @@ const useSafelyParsedJSON = ( jsonStr: string ): { json: { [key: string]: unknown } | null; parseError?: unknown } => { return useMemo(() => { - try { - return { json: JSON.parse(jsonStr) }; - } catch (e) { - return { json: null, parseError: e }; - } + return safelyParseJSON(jsonStr); }, [jsonStr]); }; @@ -149,6 +147,8 @@ export function SpanDetails({ spanNodeId: string; projectId: string; }) { + const isPromptPlaygroundEnabled = useFeatureFlag("playground"); + const navigate = useNavigate(); const { span } = useLazyLoadQuery( graphql` query SpanDetailsQuery($spanId: GlobalID!) { @@ -245,6 +245,18 @@ export function SpanDetails({ {span.name} + {isPromptPlaygroundEnabled ? ( + + ) : null} @@ -1159,7 +1170,6 @@ function DocumentItem({ bodyStyle={{ padding: 0, }} - // @ts-expect-error force putting the title in as a string title={ } /> @@ -1282,33 +1292,7 @@ function LLMMessage({ message }: { message: AttributeMessage }) { message[MessageAttributePostfixes.function_call_arguments_json] && message[MessageAttributePostfixes.function_call_name]; const role = message[MessageAttributePostfixes.role] || "unknown"; - const messageStyles = useMemo(() => { - if (role === "user") { - return { - backgroundColor: "grey-100", - borderColor: "grey-500", - }; - } else if (role === "assistant") { - return { - backgroundColor: "blue-100", - borderColor: "blue-700", - }; - } else if (role === "system") { - return { - backgroundColor: "indigo-100", - borderColor: "indigo-700", - }; - } else if (["function", "tool"].includes(role)) { - return { - backgroundColor: "yellow-100", - borderColor: "yellow-700", - }; - } - return { - backgroundColor: "grey-100", - borderColor: "grey-700", - }; - }, [role]); + const messageStyles = useChatMessageStyles(role); return ( @@ -1405,7 +1389,6 @@ function LLMToolSchema({ return ( #{index + 1}} {...defaultCardProps} diff --git a/app/src/schemas/index.ts b/app/src/schemas/index.ts new file mode 100644 index 0000000000..c446331ef4 --- /dev/null +++ b/app/src/schemas/index.ts @@ -0,0 +1 @@ +export * from "./toolSchema"; diff --git a/app/src/schemas/toolSchema.ts b/app/src/schemas/toolSchema.ts new file mode 100644 index 0000000000..91388965ec --- /dev/null +++ b/app/src/schemas/toolSchema.ts @@ -0,0 +1,87 @@ +import { z } from "zod"; +import zodToJsonSchema from "zod-to-json-schema"; + +/** + * The schema for a tool definition + * @see https://platform.openai.com/docs/guides/structured-outputs/supported-schemas + * + * Note: The nested passThrough's are used to allow for extra keys in JSON schema, however, they do not actually + * allow for extra keys when the zod schema is used for parsing. + */ +export const toolSchema = z + .object({ + type: z.literal("function").describe("The type of the tool"), + function: z + .object({ + name: z.string().describe("The name of the function"), + description: z + .string() + .optional() + .describe("A description of the function"), + parameters: z + .object({ + type: z.literal("object"), + properties: z + .record( + z + .object({ + type: z + .enum([ + "string", + "number", + "boolean", + "object", + "array", + "null", + "integer", + ]) + .describe("The type of the parameter"), + description: z + .string() + .optional() + .describe("A description of the parameter"), + enum: z + .array(z.string()) + .optional() + .describe("The allowed values"), + }) + .passthrough() + ) + .describe("A map of parameter names to their definitions"), + required: z + .array(z.string()) + .optional() + .describe("The required parameters"), + additionalProperties: z + .boolean() + .optional() + .describe( + "Whether or not additional properties are allowed in the schema" + ), + strict: z + .boolean() + .optional() + .describe( + "Whether or not the arguments should exactly match the function definition, only supported for OpenAI models" + ), + }) + .passthrough() + .describe("The parameters that the function accepts"), + }) + .passthrough() + .describe("The function definition"), + }) + .passthrough(); + +/** + * The type of a tool definition + * @see https://platform.openai.com/docs/guides/structured-outputs/supported-schemas + */ +export type ToolDefinition = z.infer; + +/** + * The JSON schema for a tool definition + */ +export const toolJSONSchema = zodToJsonSchema(toolSchema, { + removeAdditionalStrategy: "passthrough", +}); diff --git a/app/src/store/credentialsStore.tsx b/app/src/store/credentialsStore.tsx new file mode 100644 index 0000000000..42ae3e259a --- /dev/null +++ b/app/src/store/credentialsStore.tsx @@ -0,0 +1,31 @@ +import { create, StateCreator } from "zustand"; +import { devtools, persist } from "zustand/middleware"; + +export type CredentialsProps = Partial>; + +export interface CredentialsState extends CredentialsProps { + /** + * Setter for a given credential + * @param credential the name of the credential to set + * @param value the value of the credential + */ + setCredential: (params: { provider: ModelProvider; value: string }) => void; +} + +export const createCredentialsStore = ( + initialProps: Partial +) => { + const credentialsStore: StateCreator = (set) => ({ + setCredential: ({ provider, value }) => { + set({ [provider]: value }); + }, + ...initialProps, + }); + return create()( + persist(devtools(credentialsStore), { + name: "arize-phoenix-credentials", + }) + ); +}; + +export type CredentialsStore = ReturnType; diff --git a/app/src/store/index.tsx b/app/src/store/index.tsx index 98fbe30497..13b6d059a0 100644 --- a/app/src/store/index.tsx +++ b/app/src/store/index.tsx @@ -1,3 +1,4 @@ export * from "./pointCloudStore"; export * from "./tracingStore"; -export * from "./playgroundStore"; +export * from "./playground"; +export * from "./credentialsStore"; diff --git a/app/src/store/playground/index.ts b/app/src/store/playground/index.ts new file mode 100644 index 0000000000..a26b3b3411 --- /dev/null +++ b/app/src/store/playground/index.ts @@ -0,0 +1,2 @@ +export * from "./playgroundStore"; +export * from "./types"; diff --git a/app/src/store/playground/playgroundStore.tsx b/app/src/store/playground/playgroundStore.tsx new file mode 100644 index 0000000000..88a437a2c6 --- /dev/null +++ b/app/src/store/playground/playgroundStore.tsx @@ -0,0 +1,388 @@ +import { create, StateCreator } from "zustand"; +import { devtools } from "zustand/middleware"; + +import { TemplateLanguages } from "@phoenix/components/templateEditor/constants"; +import { getTemplateLanguageUtils } from "@phoenix/components/templateEditor/templateEditorUtils"; +import { TemplateLanguage } from "@phoenix/components/templateEditor/types"; +import { + DEFAULT_CHAT_ROLE, + DEFAULT_MODEL_PROVIDER, +} from "@phoenix/constants/generativeConstants"; +import { assertUnreachable } from "@phoenix/typeUtils"; + +import { + GenAIOperationType, + InitialPlaygroundState, + isManualInput, + PlaygroundChatTemplate, + PlaygroundInputMode, + PlaygroundInstance, + PlaygroundState, + PlaygroundTextCompletionTemplate, + Tool, +} from "./types"; + +let playgroundInstanceId = 0; +let playgroundRunId = 0; +// This value must be truthy in order for message re-ordering to work +let playgroundMessageId = 1; +let playgroundToolId = 0; + +/** + * Generates a new playground instance ID + */ +export const generateInstanceId = () => playgroundInstanceId++; + +/** + * Generates a new playground message ID + */ +export const generateMessageId = () => playgroundMessageId++; + +/** + * Generates a new playground tool ID + */ +export const generateToolId = () => playgroundToolId++; + +/** + * Resets the playground instance ID to 0 + * + * NB: This is only used for testing purposes + */ +export const _resetInstanceId = () => { + playgroundInstanceId = 0; +}; + +/** + * Resets the playground message ID to 0 + * + * NB: This is only used for testing purposes + */ +export const _resetMessageId = () => { + playgroundMessageId = 0; +}; + +/** + * Resets the playground tool ID to 0 + * + * NB: This is only used for testing purposes + */ +export const _resetToolId = () => { + playgroundToolId = 0; +}; + +const generateChatCompletionTemplate = (): PlaygroundChatTemplate => ({ + __type: "chat", + messages: [ + { + id: generateMessageId(), + role: "system", + content: "You are a chatbot", + }, + { + id: generateMessageId(), + role: "user", + content: "{{question}}", + }, + ], +}); + +const DEFAULT_TEXT_COMPLETION_TEMPLATE: PlaygroundTextCompletionTemplate = { + __type: "text_completion", + prompt: "{{question}}", +}; + +export function createPlaygroundInstance(): PlaygroundInstance { + return { + id: generateInstanceId(), + template: generateChatCompletionTemplate(), + model: { provider: DEFAULT_MODEL_PROVIDER, modelName: "gpt-4o" }, + tools: [], + toolChoice: undefined, + // TODO(apowell) - use datasetId if in dataset mode + input: { variablesValueCache: {}, variableKeys: [] }, + output: undefined, + activeRunId: null, + isRunning: false, + }; +} + +export function createTool(toolNumber: number): Tool { + return { + id: generateToolId(), + definition: { + type: "function", + function: { + name: `new_function_${toolNumber}`, + parameters: { + type: "object", + properties: { + new_arg: { + type: "string", + }, + }, + required: [], + }, + }, + }, + }; +} + +export const createPlaygroundStore = ( + initialProps?: InitialPlaygroundState +) => { + const playgroundStore: StateCreator = (set, get) => ({ + operationType: "chat", + inputMode: "manual", + input: { + // to get a record of visible variables and their values, + // use usePlaygroundContext(selectDerivedInputVariables). do not render variablesValueCache + // directly or users will see stale values. + variablesValueCache: { + question: "", + }, + variableKeys: ["question"], + }, + templateLanguage: TemplateLanguages.Mustache, + setInputMode: (inputMode: PlaygroundInputMode) => set({ inputMode }), + instances: [createPlaygroundInstance()], + setOperationType: (operationType: GenAIOperationType) => { + if (operationType === "chat") { + set({ + instances: get().instances.map((instance) => ({ + ...instance, + template: generateChatCompletionTemplate(), + })), + }); + } else { + set({ + instances: get().instances.map((instance) => ({ + ...instance, + template: DEFAULT_TEXT_COMPLETION_TEMPLATE, + })), + }); + } + set({ operationType }); + }, + addInstance: () => { + const instances = get().instances; + const firstInstance = get().instances[0]; + if (!firstInstance) { + return; + } + set({ + instances: [ + ...instances, + { + ...firstInstance, + id: generateInstanceId(), + activeRunId: null, + }, + ], + }); + }, + updateModel: ({ instanceId, model }) => { + const instances = get().instances; + const instance = instances.find((instance) => instance.id === instanceId); + if (!instance) { + return; + } + const currentModel = instance.model; + if (model.provider !== currentModel.provider) { + // Force clear the model name if the provider changes + model = { + ...model, + modelName: undefined, + }; + } + set({ + instances: instances.map((instance) => { + if (instance.id === instanceId) { + return { + ...instance, + model: { + ...instance.model, + ...model, + }, + }; + } + return instance; + }), + }); + }, + deleteInstance: (instanceId: number) => { + const instances = get().instances; + set({ + instances: instances.filter((instance) => instance.id !== instanceId), + }); + }, + addMessage: ({ playgroundInstanceId }) => { + const instances = get().instances; + + // Update the given instance + set({ + instances: instances.map((instance) => { + if ( + instance.id === playgroundInstanceId && + instance?.template && + instance?.template.__type === "chat" + ) { + return { + ...instance, + messages: [ + ...instance.template.messages, + { role: DEFAULT_CHAT_ROLE, content: "{question}" }, + ], + }; + } + return instance; + }), + }); + }, + updateInstance: ({ instanceId, patch }) => { + const instances = get().instances; + set({ + instances: instances.map((instance) => { + if (instance.id === instanceId) { + return { + ...instance, + ...patch, + }; + } + return instance; + }), + }); + get().calculateVariables(); + }, + runPlaygroundInstances: () => { + const instances = get().instances; + set({ + instances: instances.map((instance) => ({ + ...instance, + activeRunId: playgroundRunId++, + isRunning: true, + })), + }); + }, + runPlaygroundInstance: (instanceId: number) => { + const instances = get().instances; + set({ + instances: instances.map((instance) => { + if (instance.id === instanceId) { + return { + ...instance, + activeRunId: playgroundRunId++, + isRunning: true, + }; + } + return instance; + }), + }); + }, + markPlaygroundInstanceComplete: (instanceId: number) => { + const instances = get().instances; + set({ + instances: instances.map((instance) => { + if (instance.id === instanceId) { + return { + ...instance, + isRunning: false, + }; + } + return instance; + }), + }); + }, + setTemplateLanguage: (templateLanguage: TemplateLanguage) => { + set({ templateLanguage }); + // Re-compute variables when the template language changes + get().calculateVariables(); + }, + calculateVariables: () => { + const instances = get().instances; + const variables = new Set(); + const utils = getTemplateLanguageUtils(get().templateLanguage); + instances.forEach((instance) => { + const instanceType = instance.template.__type; + // this double nested loop should be okay since we don't expect more than 4 instances + // and a handful of messages per instance + switch (instanceType) { + case "chat": { + // for each chat message in the instance + instance.template.messages.forEach((message) => { + // extract variables from the message content + const extractedVariables = + message.content == null + ? [] + : utils.extractVariables(message.content); + extractedVariables.forEach((variable) => { + variables.add(variable); + }); + }); + break; + } + case "text_completion": { + const extractedVariables = utils.extractVariables( + instance.template.prompt + ); + extractedVariables.forEach((variable) => { + variables.add(variable); + }); + break; + } + default: { + assertUnreachable(instanceType); + } + } + }); + set({ + input: { ...get().input, variableKeys: [...Array.from(variables)] }, + }); + }, + setVariableValue: (key: string, value: string) => { + const input = get().input; + if (isManualInput(input)) { + set({ + input: { + ...input, + variablesValueCache: { ...input.variablesValueCache, [key]: value }, + }, + }); + } + }, + ...initialProps, + }); + return create(devtools(playgroundStore)); +}; + +export type PlaygroundStore = ReturnType; + +/** + * Selects the variable keys from the playground state + * @param state the playground state + * @returns the variable keys + */ +export const selectInputVariableKeys = (state: PlaygroundState) => { + if (isManualInput(state.input)) { + return state.input.variableKeys; + } + return []; +}; + +/** + * Selects the derived input variables from the playground state + * @param state the playground state + * @returns the derived input variables + */ +export const selectDerivedInputVariables = (state: PlaygroundState) => { + if (isManualInput(state.input)) { + const input = state.input; + const variableKeys = input.variableKeys; + const variablesValueCache = input.variablesValueCache; + const valueMap: Record = {}; + variableKeys.forEach((key) => { + valueMap[key] = variablesValueCache?.[key] || ""; + }); + return valueMap; + } + return {}; +}; diff --git a/app/src/store/playground/types.ts b/app/src/store/playground/types.ts new file mode 100644 index 0000000000..01d6357f82 --- /dev/null +++ b/app/src/store/playground/types.ts @@ -0,0 +1,249 @@ +import { TemplateLanguage } from "@phoenix/components/templateEditor/types"; +import { ToolDefinition } from "@phoenix/schemas"; + +export type GenAIOperationType = "chat" | "text_completion"; +/** + * The input mode for the playground + * @example "manual" or "dataset" + */ +export type PlaygroundInputMode = "manual" | "dataset"; + +/** + * A tool call that invokes a function with JSON arguments + * @example + * ```typescript + * { + * id: "1", + * function: { + * name: "getCurrentWeather", + * arguments: "{ \"city\": \"San Francisco\" }" + * } + * } + * ``` + */ +export type ToolCall = { + id: string; + function: { + name: string; + arguments: string; + }; +}; + +/** + * A chat message with a role and content + * @example { role: "user", content: "What is the weather in San Francisco?" } + * @example + * ```typescript + * { + * "role": "assistant", + * "toolCalls": [ + * { + * "id": "1", + * "function": { + * "name": "getCurrentWeather", + * "arguments": "{ \"city\": \"San Francisco\" }" + * } + * } + * ] + * } + * ``` + */ +export type ChatMessage = { + id: number; + role: ChatMessageRole; + content?: string; + toolCalls?: ToolCall[]; +}; + +/** + * A template for a chat completion playground + * Takes a list of messages for multi-turn + * @see https://platform.openai.com/docs/guides/chat-completions + */ +export type PlaygroundChatTemplate = { + __type: "chat"; + messages: ChatMessage[]; +}; + +/** + * A template for a text completion playground + * A single prompt for text completion + */ +export type PlaygroundTextCompletionTemplate = { + __type: "text_completion"; + prompt: string; +}; + +/** + * A playground template can be a chat completion or text completion (legacy) + */ +export type PlaygroundTemplate = + | PlaygroundChatTemplate + | PlaygroundTextCompletionTemplate; + +type DatasetInput = { + datasetId: string; +}; + +type ManualInput = { + variablesValueCache: Record; + variableKeys: string[]; +}; + +type PlaygroundInput = DatasetInput | ManualInput; + +export type ModelConfig = { + provider: ModelProvider; + modelName: string | null; +}; + +/** + * The type of a tool in the playground + */ +export type Tool = { + id: number; + definition: Partial; +}; + +/** + * A single instance of the playground that has + * - a template + * - tools + * - input (dataset or manual) + * - output (experiment or spans) + */ +export interface PlaygroundInstance { + /** + * An ID to uniquely identify the instance + */ + id: number; + template: PlaygroundTemplate; + tools: Tool[]; + toolChoice: ToolChoice | undefined; + input: PlaygroundInput; + model: ModelConfig; + output: ChatMessage[] | undefined | string; + activeRunId: number | null; + /** + * Whether or not the playground instance is actively running or not + **/ + isRunning: boolean; +} + +/** + * All actions for a playground instance must contain the id of the instance + */ +interface PlaygroundInstanceActionParams { + playgroundInstanceId: number; +} + +export interface AddMessageParams extends PlaygroundInstanceActionParams {} + +export interface PlaygroundProps { + /** + * How the LLM API should be invoked. Distinguishes between chat and text_completion. + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/ + * @default "chat" + */ + operationType: GenAIOperationType; + /** + * The input mode for the playground(s) + * NB: the input mode for all instances is synchronized + * @default "manual" + */ + inputMode: PlaygroundInputMode; + /** + * The input to all the playground instances + */ + input: PlaygroundInput; + /** + * The current playground instances(s) + * Defaults to a single instance until a second instance is added + */ + instances: Array; + + /** + * The current template language for all instances + * @default "mustache" + */ + templateLanguage: TemplateLanguage; +} + +export type InitialPlaygroundState = Partial; + +export interface PlaygroundState extends PlaygroundProps { + /** + * Setter for the invocation mode + * @param operationType + */ + setOperationType: (operationType: GenAIOperationType) => void; + /** + * Setter for the input mode. + */ + setInputMode: (inputMode: PlaygroundInputMode) => void; + /** + * Add a comparison instance to the playground + */ + addInstance: () => void; + /** + * Delete a specific instance of the playground + * @param instanceId the instance to delete + */ + deleteInstance: (instanceId: number) => void; + /** + * Add a message to a playground instance + */ + addMessage: (params: AddMessageParams) => void; + /** + * Update an instance of the playground + */ + updateInstance: (params: { + instanceId: number; + patch: Partial; + }) => void; + /** + * Update an instance's model configuration + */ + updateModel: (params: { + instanceId: number; + model: Partial; + }) => void; + /** + * Run all the active playground Instances + */ + runPlaygroundInstances: () => void; + /** + * Run a specific playground instance + */ + runPlaygroundInstance: (instanceId: number) => void; + /** + * Mark a given playground instance as completed + */ + markPlaygroundInstanceComplete: (instanceId: number) => void; + /** + * Set the template language for all instances + */ + setTemplateLanguage: (templateLanguage: TemplateLanguage) => void; + /** + * Calculate the variables used across all instances + */ + calculateVariables: () => void; + + setVariableValue: (key: string, value: string) => void; +} + +/** + * Check if the input is manual + */ +export const isManualInput = (input: PlaygroundInput): input is ManualInput => { + return "variablesValueCache" in input && "variableKeys" in input; +}; + +/** + * Check if the input is a dataset + */ +export const isDatasetInput = ( + input: PlaygroundInput +): input is DatasetInput => { + return "datasetId" in input; +}; diff --git a/app/src/store/playgroundStore.tsx b/app/src/store/playgroundStore.tsx deleted file mode 100644 index 8e4bb96d16..0000000000 --- a/app/src/store/playgroundStore.tsx +++ /dev/null @@ -1,34 +0,0 @@ -import { create, StateCreator } from "zustand"; -import { devtools } from "zustand/middleware"; - -export type GenAIOperationType = "chat" | "text_completion"; -export interface PlaygroundProps { - /** - * How the LLM API should be invoked. Distinguishes between chat and text_completion. - * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/ - * @default "chat" - */ - operationType: GenAIOperationType; -} - -export interface PlaygroundState extends PlaygroundProps { - /** - * Setter for the invocation mode - * @param operationType - */ - setOperationType: (operationType: GenAIOperationType) => void; -} - -export const createPlaygroundStore = ( - initialProps?: Partial -) => { - const playgroundStore: StateCreator = (set) => ({ - operationType: "chat", - setOperationType: (operationType: GenAIOperationType) => - set({ operationType }), - ...initialProps, - }); - return create(devtools(playgroundStore)); -}; - -export type PlaygroundStore = ReturnType; diff --git a/app/src/typeUtils.ts b/app/src/typeUtils.ts index 936a698c57..813a83e902 100644 --- a/app/src/typeUtils.ts +++ b/app/src/typeUtils.ts @@ -1,3 +1,5 @@ +import { z } from "zod"; + /** * Utility function that uses the type system to check if a switch statement is exhaustive. * If the switch statement is not exhaustive, there will be a type error caught in typescript @@ -45,3 +47,25 @@ export function isObject(value: unknown): value is object { export type Mutable = { -readonly [P in keyof T]: T[P]; }; + +/** + * A zod type utility that ensures that the schema is written to correctly match (at least) what is included in the type. + * Note it does not guard against extra fields in the schema not present in the type. + * @example + * ```typescript + * const chatMessageSchema = schemaForType()( + * z.object({ + * id: z.number(), + * role: chatMessageRolesSchema, + * content: z.string(), + * }) + * ); + * ``` + * Taken from the zod maintainer here: + * @see https://github.com/colinhacks/zod/issues/372#issuecomment-826380330 + */ +export const schemaForType = + () => + >(arg: S) => { + return arg; + }; diff --git a/app/src/utils/__tests__/spanUtils.test.ts b/app/src/utils/__tests__/spanUtils.test.ts new file mode 100644 index 0000000000..e879ddc575 --- /dev/null +++ b/app/src/utils/__tests__/spanUtils.test.ts @@ -0,0 +1,46 @@ +import { llmSpanToInvocation } from "../spanUtils"; + +const chatCompletionLLMSpanAttributes = { + // input: { + // mime_type: "application/json", + // value: + // '{"messages": [{"role": "system", "content": "You are an expert Q&A system that is trusted around the world.\\nAlways answer the query using the provided context information, and not prior knowledge.\\nSome rules to follow:\\n1. Never directly reference the given context in your answer.\\n2. Avoid statements like \'Based on the context, ...\' or \'The context information ...\' or anything along those lines.", "additional_kwargs": {}}, {"role": "user", "content": "Context information is below.\\n---------------------\\nsource: https://docs.arize.com/phoenix/tracing/concepts-tracing/what-are-traces\\ntitle: Traces\\n\\nNot more than 4-1/4 inches high, or more than 6 inches long, or greater than \\\\n0.016 inch thick.\\\\nd. Not more than 3.5 ounces (Charge flat-size prices for First-Class Mail \\\\ncard-type pieces over 3.5 ounces.)\\\\n\\\\n Instruction: Based on the above documents, provide a detailed answer for the user question below.\\\\n Answer \\\\\\"don\'t know\\\\\\" if not present in the document.\\\\n \\",\\n\\n\\n \\n\\"llm.input_messages.1.message.role\\"\\n:\\n \\n\\"user\\"\\n,\\n\\n\\n \\n\\"llm.input_messages.1.message.content\\"\\n:\\n \\n\\"Hello\\"\\n,\\n\\n\\n \\n\\"llm.model_name\\"\\n:\\n \\n\\"gpt-4-turbo-preview\\"\\n,\\n\\n\\n \\n\\"llm.invocation_parameters\\"\\n:\\n \\n\\"{\\\\\\"temperature\\\\\\": 0.1, \\\\\\"model\\\\\\": \\\\\\"gpt-4-turbo-preview\\\\\\"}\\"\\n,\\n\\n\\n \\n\\"output.value\\"\\n:\\n \\n\\"How are you?\\"\\n }\\n,\\n\\n\\n \\n\\"events\\"\\n:\\n []\\n,\\n\\n\\n \\n\\"links\\"\\n:\\n []\\n,\\n\\n\\n \\n\\"resource\\"\\n:\\n {\\n\\n\\n \\n\\"attributes\\"\\n:\\n {}\\n,\\n\\n\\n \\n\\"schema_url\\"\\n:\\n \\n\\"\\"\\n\\n\\n }\\n\\n\\n}\\nSpans can be nested, as is implied by the presence of a parent span ID: child spans represent sub-operations. This allows spans to more accurately capture the work done in an application.\\nTraces\\nA trace records the paths taken by requests (made by an application or end-user) as they propagate through multiple steps.\\nWithout tracing, it is challenging to pinpoint the cause of performance problems in a system.\\nIt improves the visibility of our application or system\\u2019s health and lets us debug behavior that is difficult to reproduce locally. Tracing is essential for LLM applications, which commonly have nondeterministic problems or are too complicated to reproduce locally.\\nTracing makes debugging and understanding LLM applications less daunting by breaking down what happens within a request as it flows through a system.\\nA trace is made of one or more spans. The first span represents the root span. Each root span represents a request from start to finish. The spans underneath the parent provide a more in-depth context of what occurs during a request (or what steps make up a request).\\nProjects\\nA \\nproject\\n is a collection of traces. You can think of a project as a container for all the traces that are related to a single application or service. You can have multiple projects, and each project can have multiple traces. Projects can be useful for various use-cases such as separating out environments, logging traces for evaluation runs, etc. To learn more about how to setup projects, see the \\nhow-to guide.\\nSpan Kind\\nWhen a span is created, it is created as one of the following: Chain, Retriever, Reranker, LLM, Embedding, Agent, or Tool. \\nCHAIN\\nA Chain is a starting point or a link between different LLM application steps. For example, a Chain span could be used to represent the beginning of a request to an LLM application or the glue code that passes context from a retriever to and LLM call.\\nRETRIEVER\\nA Retriever is a span that represents a data retrieval step. For example, a Retriever span could be used to represent a call to a vector store or a database.\\nRERANKER\\nA Reranker is a span that represents the reranking of a set of input documents. For example, a cross-encoder may be used to compute the input documents\' relevance scores with respect to a user query, and the top K documents with the highest scores are then returned by the Reranker.\\nLLM\\nAn LLM is a span that represents a call to an LLM. For example, an LLM span could be used to represent a call to OpenAI or Llama.\\nEMBEDDING\\nAn Embedding is a span that represents a call to an LLM for an embedding. For example, an Embedding span could be used to represent a call OpenAI to get an ada-2 embedding for retrieval.\\nTOOL\\nA Tool is a span that represents a call to an external tool such as a calculator or a weather API.\\nAGENT\\nA span that encompasses calls to LLMs and Tools. An agent describes a reasoning block that acts on tools using the guidance of an LLM.\\n\\nSpan Attributes\\nAttributes are key-value pairs that contain metadata that you can use to annotate a span to carry information about the operation it is tracking.\\nFor example, if a span invokes an LLM, you can capture the model name, the invocation parameters, the token count, and so on.\\nAttributes have the following rules:\\nKeys must be non-null string values\\nValues must be a non-null string, boolean, floating point value, integer, or an array of these values Additionally, there are Semantic Attributes, which are known naming conventions for metadata that is typically present in common operations. It\'s helpful to use semantic attribute naming wherever possible so that common kinds of metadata are standardized across systems. See \\nsemantic conventions\\n for more information.\\n\\nsource: https://docs.arize.com/phoenix/tracing/llm-traces\\ntitle: Overview: Tracing\\n\\nOverview: Tracing\\nTracing the execution of LLM applications using Telemetry\\nLLM tracing records the paths taken by requests as they propagate through multiple steps or components of an LLM application. For example, when a user interacts with an LLM application, tracing can capture the sequence of operations, such as document retrieval, embedding generation, language model invocation, and response generation to provide a detailed timeline of the request\'s execution.\\nTracing is a helpful tool for understanding how your LLM application works. Phoenix offers comprehensive tracing capabilities that are not tied to any specific LLM vendor or framework. Phoenix accepts traces over the OpenTelemetry protocol (OTLP) and supports first-class instrumentation for a variety of frameworks ( \\nLlamaIndex\\n, \\nLangChain\\n,\\n DSPy\\n), SDKs (\\nOpenAI\\n, \\nBedrock\\n, \\nMistral\\n, \\nVertex\\n), and Languages. (Python, Javascript, etc.)\\nUsing Phoenix\'s tracing capabilities can provide important insights into the inner workings of your LLM application. By analyzing the collected trace data, you can identify and address various performance and operational issues and improve the overall reliability and efficiency of your system.\\nApplication Latency\\n: Identify and address slow invocations of LLMs, Retrievers, and other components within your application, enabling you to optimize performance and responsiveness.\\nToken Usage\\n: Gain a detailed breakdown of token usage for your LLM calls, allowing you to identify and optimize the most expensive LLM invocations.\\nRuntime Exceptions\\n: Capture and inspect critical runtime exceptions, such as rate-limiting events, that can help you proactively address and mitigate potential issues.\\nRetrieved Documents\\n: Inspect the documents retrieved during a Retriever call, including the score and order in which they were returned to provide insight into the retrieval process.\\nEmbeddings\\n: Examine the embedding text used for retrieval and the underlying embedding model to allow you to validate and refine your embedding strategies.\\nLLM Parameters\\n: Inspect the parameters used when calling an LLM, such as temperature and system prompts, to ensure optimal configuration and debugging.\\nPrompt Templates\\n: Understand the prompt templates used during the prompting step and the variables that were applied, allowing you to fine-tune and improve your prompting strategies.\\nTool Descriptions\\n: View the descriptions and function signatures of the tools your LLM has been given access to in order to better understand and control your LLM\\u2019s capabilities.\\nLLM Function Calls\\n: For LLMs with function call capabilities (e.g., OpenAI), you can inspect the function selection and function messages in the input to the LLM, further improving your ability to debug and optimize your application.\\nBy using tracing in Phoenix, you can gain increased visibility into your LLM application, empowering you to identify and address performance bottlenecks, optimize resource utilization, and ensure the overall reliability and effectiveness of your system.\\nView the inner workings for your LLM Application\\nTo get started, check out the \\nQuickstart guide\\nAfter that, read through the \\nConcepts Section\\n to get and understanding of the different components.\\nIf you want to learn how to accomplish a particular task, check out the \\nHow-To Guides.\\n\\n\\nPrevious\\nFAQs: Deployment\\nNext\\nQuickstart: Tracing\\nLast updated \\n6 hours ago\\n---------------------\\nGiven the context information and not prior knowledge, answer the query.\\nQuery: Can I use gRPC for trace collection?\\nAnswer: ", "additional_kwargs": {}}]}', + // }, + llm: { + output_messages: [ + { + message: { + content: "This is an AI Answer", + role: "assistant", + }, + }, + ], + model_name: "gpt-3.5-turbo", + token_count: { completion: 9.0, prompt: 1881.0, total: 1890.0 }, + input_messages: [ + { + message: { + content: "You are a chatbot", + role: "system", + }, + }, + { + message: { + content: "Anser me the following question. Are you sentient?", + role: "user", + }, + }, + ], + invocation_parameters: + '{"context_window": 16384, "num_output": -1, "is_chat_model": true, "is_function_calling_model": true, "model_name": "gpt-3.5-turbo"}', + }, + openinference: { span: { kind: "LLM" } }, + // output: { value: "assistant: You can use gRPC for trace collection." }, +}; + +describe("spanUtils", () => { + it("should convert a chat completion llm span to an invocation object type", () => { + const result = llmSpanToInvocation(chatCompletionLLMSpanAttributes); + expect(result).toEqual({}); + }); +}); diff --git a/app/src/utils/generativeUtils.ts b/app/src/utils/generativeUtils.ts new file mode 100644 index 0000000000..ace1efa69b --- /dev/null +++ b/app/src/utils/generativeUtils.ts @@ -0,0 +1,10 @@ +/** + * A TypeGuard to ensure that a string is a valid ModelProvider + */ +export function isModelProvider(provider: string): provider is ModelProvider { + return ( + provider === "OPENAI" || + provider === "AZURE_OPENAI" || + provider === "ANTHROPIC" + ); +} diff --git a/app/src/utils/jsonUtils.ts b/app/src/utils/jsonUtils.ts index 395388f3e2..792f76bc1d 100644 --- a/app/src/utils/jsonUtils.ts +++ b/app/src/utils/jsonUtils.ts @@ -32,3 +32,11 @@ export function isJSONString({ export function isJSONObjectString(str: string) { return isJSONString({ str, excludeArray: true, excludePrimitives: true }); } + +export function safelyParseJSON(str: string) { + try { + return { json: JSON.parse(str) }; + } catch (e) { + return { json: null, parseError: e }; + } +} diff --git a/app/src/utils/spanUtils.ts b/app/src/utils/spanUtils.ts new file mode 100644 index 0000000000..f81b76be06 --- /dev/null +++ b/app/src/utils/spanUtils.ts @@ -0,0 +1,3 @@ +export function llmSpanToInvocation(_span: unknown): unknown { + return {}; +} diff --git a/app/vite.config.mts b/app/vite.config.mts index bb1631018d..b73607cf91 100644 --- a/app/vite.config.mts +++ b/app/vite.config.mts @@ -1,3 +1,8 @@ +// TODO(apowell): we need to follow the vite tsconfig convention: tsconfig.json, tsconfig.node.json, tsconfig.app.json +// tsconfig.json references tsconfig.app.json and tsconfig.node.json, each of which include react src and node src respectively +// this current file being a part of tsconfig.node.json scope in particular +// @ts-expect-error we need a separate tsconfig for vite +import { lezer } from "@lezer/generator/rollup"; import react from "@vitejs/plugin-react"; import { resolve } from "path"; import { visualizer } from "rollup-plugin-visualizer"; @@ -6,7 +11,7 @@ import { defineConfig } from "vite"; import relay from "vite-plugin-relay"; export default defineConfig(({ command }) => { - const plugins = [react(), relay]; + const plugins = [react(), relay, lezer()]; // Development uses the serve command if (command === "serve") { plugins.push(visualizer()); @@ -30,6 +35,7 @@ export default defineConfig(({ command }) => { include: ["../__tests__/*.test.ts", "**/__tests__/*.test.ts"], exclude: ["../node_modules/**"], environment: "jsdom", + setupFiles: ["./vitest.setup.ts"], globals: true, }, build: { diff --git a/app/vitest.setup.ts b/app/vitest.setup.ts new file mode 100644 index 0000000000..48dd31cb61 --- /dev/null +++ b/app/vitest.setup.ts @@ -0,0 +1,9 @@ +import "vitest-canvas-mock"; + +Object.defineProperty(window, "Config", { + value: { + authenticationEnabled: true, + basename: "/", + platformVersion: "1.0.0", + }, +}); diff --git a/cspell.json b/cspell.json index f128d64789..0ac8a3cc89 100644 --- a/cspell.json +++ b/cspell.json @@ -19,6 +19,7 @@ "dunder", "Evals", "fastapi", + "genai", "gitbook", "graphiql", "HDBSCAN", diff --git a/internal_docs/specs/playground.md b/internal_docs/specs/playground.md new file mode 100644 index 0000000000..a0f9c54c61 --- /dev/null +++ b/internal_docs/specs/playground.md @@ -0,0 +1,128 @@ +# Playground + +Authors: @mikeldking + +As a user of Phoenix I don't want to have to go back to my IDE to iterate on a prompt. I don’t have to worry about programming languages or dependencies. I want to be able to use the data stored in Phoenix (spans, datasets) and run them through prompt(s) and prompt template(s). + +## Terminology + +- **operation ** Refers to how the LLM is invoked (chat, text_completion). We will consider chat to be higher priority (https://opentelemetry.io/docs/specs/semconv/attributes-registry/gen-ai/) +- playground input source - refers to whether the input to the template is "manual" e.g. modifiable by the user or "dataset". + +## Use-cases + +A user may want to use the playground to: + +- Test a prompt template +- LLM Replay: replay a template or prompt change on an LLM Span - to live debug an improvement +- Run a template change on a dataset (sweep over a set of inputs) - e.g. a prompt change experiment +- A/B Testing of models and templates +- evaluation template creation: run an evaluation template on a single chosen production span or Dataset - Workflow is testing your Evals and be able to save as experiment +- Synthetic data Generation - Use to generate synthetic data, add columns to current rows of data in a dataset, to help create test data + +### Prompt Template + +As an AI engineer, I may want to use a prompt playground to explore synthesis, cost, latency, etc. under different scenarios. This means that the playground needs to be more flexible than a vendor’s playground as it needs “unify” the API across vendors. + +As a user, I want to be able to "run" a template and see the results as the tokens arrive. But I also want this data to be recorded (as a span) so that I can use it for datasets and annotations (e.g. stash the ones that I like). + +### LLM Replay + +As an AI engineer that is already using Phoenix tracing, I want the ability to take an LLM span and replay the synthesis to see if a tweaked response will make any difference in the output. This means that all the necessary fields for synthesis must be able to be translated from semantic attribute values to the playground. + +- llm vendor +- llm name +- operation type (chat, text_completion) +- invocation parameters +- messages and roles +- tools +- output schema + +The above values will have to be translated from the span to a corresponding values in the playground for invocation. + +Below is a typical attribute payload for an chat llm span: + +```typescript +{ + + llm: { + output_messages: [ + { + message: { + content: "This is an AI Answer", + role: "assistant", + }, + }, + ], + model_name: "gpt-3.5-turbo", + token_count: { completion: 9.0, prompt: 1881.0, total: 1890.0 }, + input_messages: [ + { + message: { + content: "You are a chatbot", + role: "system", + }, + }, + { + message: { + content: "Anser me the following question. Are you sentient?", + role: "user", + }, + }, + ], + invocation_parameters: + '{"context_window": 16384, "num_output": -1, "is_chat_model": true, "is_function_calling_model": true, "model_name": "gpt-3.5-turbo"}', + }, + openinference: { span: { kind: "LLM" } }, +}; +``` + +For chat the following mapping will be used: + +- llm.input_messages -> invocation.messages +- llm.invocation_parameters -> invocation.parameters +- llm.model_name -> invocation.model (e.g. vendor + model_name ) +- llm.tools -> invocation.tools +- llm.tool_selection (missing) -> playground.tool_selection (default to auto) +- (TBD) -> invocation.output_schema (output schema for JSON mode) + +### A/B Testing + +As an AI engineer I want the ability to create “multiple” playgrounds to answer certain types of questions: + +- Does prompt A produce better responses +- Does model X produce better responses + +In some cases I want to have things in A / B sync’d and sometimes I don’t. For that reason the UI should allow the user to: + +- Sync models - The user is adjusting the template or invocation parameters +- Sync templates - The user is adjusting the model +- Sync inputs - The user is testing different inputs + +### Evaluation Template + +As an AI engineer I want to ask questions about a previously recorded synthesis (e.g. LLM span) + +## Technical Features + +The following technical features are required for the playground: + +**Frontend** + +- Credential storage in local storage +- Span attribute to playground state translation + +**Backend** + +- LLM invocation interface (GraphQL or other) +- Span recording during synthesis +- Playground "session" tracking +- Playground dataset invocation tracking +- Streaming of synthesis results + +## Tracing Needs + +In order for spans -> playground invocation to be seamless, we must capture all necessary invocation parameters. This includes: + +- LLM/genai system - e.g. openai / anthropic etc. +- Output Schema - LLMs can adhere to OpenAPI schemas diff --git a/pyproject.toml b/pyproject.toml index 246759ff9c..1bea64cbe5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,6 +59,7 @@ dependencies = [ "fastapi", "pydantic>=1.0,!=2.0.*,<3", # exclude 2.0.* since it does not support the `json_encoders` configuration setting "authlib", + "websockets", ] dynamic = ["version"] diff --git a/src/phoenix/db/migrations/versions/10460e46d750_datasets.py b/src/phoenix/db/migrations/versions/10460e46d750_datasets.py index 3a4aeec79e..8d4eea00c4 100644 --- a/src/phoenix/db/migrations/versions/10460e46d750_datasets.py +++ b/src/phoenix/db/migrations/versions/10460e46d750_datasets.py @@ -20,7 +20,7 @@ class JSONB(JSON): __visit_name__ = "JSONB" -@compiles(JSONB, "sqlite") # type: ignore +@compiles(JSONB, "sqlite") def _(*args: Any, **kwargs: Any) -> str: # See https://docs.sqlalchemy.org/en/20/core/custom_types.html return "JSONB" diff --git a/src/phoenix/db/migrations/versions/3be8647b87d8_add_token_columns_to_spans_table.py b/src/phoenix/db/migrations/versions/3be8647b87d8_add_token_columns_to_spans_table.py index 141a378335..9b5a36c553 100644 --- a/src/phoenix/db/migrations/versions/3be8647b87d8_add_token_columns_to_spans_table.py +++ b/src/phoenix/db/migrations/versions/3be8647b87d8_add_token_columns_to_spans_table.py @@ -32,7 +32,7 @@ class JSONB(JSON): __visit_name__ = "JSONB" -@compiles(JSONB, "sqlite") # type: ignore +@compiles(JSONB, "sqlite") def _(*args: Any, **kwargs: Any) -> str: # See https://docs.sqlalchemy.org/en/20/core/custom_types.html return "JSONB" diff --git a/src/phoenix/db/migrations/versions/cf03bd6bae1d_init.py b/src/phoenix/db/migrations/versions/cf03bd6bae1d_init.py index e838f04f04..0baa6b90d5 100644 --- a/src/phoenix/db/migrations/versions/cf03bd6bae1d_init.py +++ b/src/phoenix/db/migrations/versions/cf03bd6bae1d_init.py @@ -20,7 +20,7 @@ class JSONB(JSON): __visit_name__ = "JSONB" -@compiles(JSONB, "sqlite") # type: ignore +@compiles(JSONB, "sqlite") def _(*args: Any, **kwargs: Any) -> str: # See https://docs.sqlalchemy.org/en/20/core/custom_types.html return "JSONB" diff --git a/src/phoenix/db/models.py b/src/phoenix/db/models.py index ad3070f6db..2adef3b19f 100644 --- a/src/phoenix/db/models.py +++ b/src/phoenix/db/models.py @@ -50,7 +50,7 @@ class JSONB(JSON): __visit_name__ = "JSONB" -@compiles(JSONB, "sqlite") # type: ignore +@compiles(JSONB, "sqlite") def _(*args: Any, **kwargs: Any) -> str: # See https://docs.sqlalchemy.org/en/20/core/custom_types.html return "JSONB" @@ -271,7 +271,7 @@ class LatencyMs(expression.FunctionElement[float]): name = "latency_ms" -@compiles(LatencyMs) # type: ignore +@compiles(LatencyMs) def _(element: Any, compiler: Any, **kw: Any) -> Any: # See https://docs.sqlalchemy.org/en/20/core/compiler.html start_time, end_time = list(element.clauses) @@ -287,7 +287,7 @@ def _(element: Any, compiler: Any, **kw: Any) -> Any: ) -@compiles(LatencyMs, "sqlite") # type: ignore +@compiles(LatencyMs, "sqlite") def _(element: Any, compiler: Any, **kw: Any) -> Any: # See https://docs.sqlalchemy.org/en/20/core/compiler.html start_time, end_time = list(element.clauses) @@ -308,21 +308,21 @@ class TextContains(expression.FunctionElement[str]): name = "text_contains" -@compiles(TextContains) # type: ignore +@compiles(TextContains) def _(element: Any, compiler: Any, **kw: Any) -> Any: # See https://docs.sqlalchemy.org/en/20/core/compiler.html string, substring = list(element.clauses) return compiler.process(string.contains(substring), **kw) -@compiles(TextContains, "postgresql") # type: ignore +@compiles(TextContains, "postgresql") def _(element: Any, compiler: Any, **kw: Any) -> Any: # See https://docs.sqlalchemy.org/en/20/core/compiler.html string, substring = list(element.clauses) return compiler.process(func.strpos(string, substring) > 0, **kw) -@compiles(TextContains, "sqlite") # type: ignore +@compiles(TextContains, "sqlite") def _(element: Any, compiler: Any, **kw: Any) -> Any: # See https://docs.sqlalchemy.org/en/20/core/compiler.html string, substring = list(element.clauses) diff --git a/src/phoenix/server/api/input_types/ChatCompletionMessageInput.py b/src/phoenix/server/api/input_types/ChatCompletionMessageInput.py new file mode 100644 index 0000000000..5c2237dae6 --- /dev/null +++ b/src/phoenix/server/api/input_types/ChatCompletionMessageInput.py @@ -0,0 +1,12 @@ +import strawberry +from strawberry.scalars import JSON + +from phoenix.server.api.types.ChatCompletionMessageRole import ChatCompletionMessageRole + + +@strawberry.input +class ChatCompletionMessageInput: + role: ChatCompletionMessageRole + content: JSON = strawberry.field( + description="The content of the message as JSON to support text and tools", + ) diff --git a/src/phoenix/server/api/input_types/InvocationParameters.py b/src/phoenix/server/api/input_types/InvocationParameters.py new file mode 100644 index 0000000000..73adda706f --- /dev/null +++ b/src/phoenix/server/api/input_types/InvocationParameters.py @@ -0,0 +1,20 @@ +from typing import List, Optional + +import strawberry +from strawberry import UNSET +from strawberry.scalars import JSON + + +@strawberry.input +class InvocationParameters: + """ + Invocation parameters interface shared between different providers. + """ + + temperature: Optional[float] = UNSET + max_completion_tokens: Optional[int] = UNSET + max_tokens: Optional[int] = UNSET + top_p: Optional[float] = UNSET + stop: Optional[List[str]] = UNSET + seed: Optional[int] = UNSET + tool_choice: Optional[JSON] = UNSET diff --git a/src/phoenix/server/api/queries.py b/src/phoenix/server/api/queries.py index 0c52856db8..4c8319555e 100644 --- a/src/phoenix/server/api/queries.py +++ b/src/phoenix/server/api/queries.py @@ -11,7 +11,7 @@ from strawberry import ID, UNSET from strawberry.relay import Connection, GlobalID, Node from strawberry.types import Info -from typing_extensions import Annotated, TypeAlias +from typing_extensions import Annotated, TypeAlias, assert_never from phoenix.db import enums, models from phoenix.db.models import ( @@ -58,6 +58,10 @@ from phoenix.server.api.types.ExperimentComparison import ExperimentComparison, RunComparisonItem from phoenix.server.api.types.ExperimentRun import ExperimentRun, to_gql_experiment_run from phoenix.server.api.types.Functionality import Functionality +from phoenix.server.api.types.GenerativeProvider import ( + GenerativeProvider, + GenerativeProviderKey, +) from phoenix.server.api.types.InferencesRole import AncillaryInferencesRole, InferencesRole from phoenix.server.api.types.Model import Model from phoenix.server.api.types.node import from_global_id, from_global_id_with_expected_type @@ -76,8 +80,89 @@ from phoenix.server.api.types.UserRole import UserRole +@strawberry.input +class ModelNamesInput: + provider_key: GenerativeProviderKey + + @strawberry.type class Query: + @strawberry.field + async def model_providers(self) -> List[GenerativeProvider]: + return [ + GenerativeProvider( + name="OpenAI", + key=GenerativeProviderKey.OPENAI, + ), + GenerativeProvider( + name="Azure OpenAI", + key=GenerativeProviderKey.AZURE_OPENAI, + ), + GenerativeProvider( + name="Anthropic", + key=GenerativeProviderKey.ANTHROPIC, + ), + ] + + @strawberry.field + async def model_names(self, input: ModelNamesInput) -> List[str]: + if (provider_key := input.provider_key) == GenerativeProviderKey.OPENAI: + return [ + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "chatgpt-4o-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-turbo-preview", + "gpt-4-0125-preview", + "gpt-4-1106-preview", + "gpt-4", + "gpt-4-0613", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-instruct", + ] + if provider_key == GenerativeProviderKey.AZURE_OPENAI: + return [ + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4o", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "chatgpt-4o-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-turbo-preview", + "gpt-4-0125-preview", + "gpt-4-1106-preview", + "gpt-4", + "gpt-4-0613", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-instruct", + ] + if provider_key == GenerativeProviderKey.ANTHROPIC: + return [ + "claude-3-5-sonnet-20240620", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307", + ] + assert_never(provider_key) + @strawberry.field(permission_classes=[IsAdmin]) # type: ignore async def users( self, diff --git a/src/phoenix/server/api/schema.py b/src/phoenix/server/api/schema.py index 0a5ac8b7de..0f153ba7cd 100644 --- a/src/phoenix/server/api/schema.py +++ b/src/phoenix/server/api/schema.py @@ -3,6 +3,7 @@ from phoenix.server.api.exceptions import get_mask_errors_extension from phoenix.server.api.mutations import Mutation from phoenix.server.api.queries import Query +from phoenix.server.api.subscriptions import Subscription # This is the schema for generating `schema.graphql`. # See https://strawberry.rocks/docs/guides/schema-export @@ -12,4 +13,5 @@ query=Query, mutation=Mutation, extensions=[get_mask_errors_extension()], + subscription=Subscription, ) diff --git a/src/phoenix/server/api/subscriptions.py b/src/phoenix/server/api/subscriptions.py new file mode 100644 index 0000000000..638eb1943a --- /dev/null +++ b/src/phoenix/server/api/subscriptions.py @@ -0,0 +1,357 @@ +import json +from collections import defaultdict +from dataclasses import fields +from datetime import datetime +from itertools import chain +from typing import ( + TYPE_CHECKING, + Annotated, + Any, + AsyncIterator, + DefaultDict, + Dict, + Iterator, + List, + Optional, + Tuple, + Union, +) + +import strawberry +from openinference.instrumentation import safe_json_dumps +from openinference.semconv.trace import ( + MessageAttributes, + OpenInferenceMimeTypeValues, + OpenInferenceSpanKindValues, + SpanAttributes, + ToolAttributes, + ToolCallAttributes, +) +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter +from opentelemetry.trace import StatusCode +from sqlalchemy import insert, select +from strawberry import UNSET +from strawberry.scalars import JSON as JSONScalarType +from strawberry.types import Info +from typing_extensions import TypeAlias, assert_never + +from phoenix.db import models +from phoenix.server.api.context import Context +from phoenix.server.api.input_types.ChatCompletionMessageInput import ChatCompletionMessageInput +from phoenix.server.api.input_types.InvocationParameters import InvocationParameters +from phoenix.server.api.types.ChatCompletionMessageRole import ChatCompletionMessageRole +from phoenix.server.api.types.GenerativeProvider import GenerativeProviderKey +from phoenix.server.dml_event import SpanInsertEvent +from phoenix.trace.attributes import unflatten +from phoenix.utilities.json import jsonify + +if TYPE_CHECKING: + from openai.types.chat import ( + ChatCompletionMessageParam, + ) + +PLAYGROUND_PROJECT_NAME = "playground" + +ToolCallIndex: TypeAlias = int + + +@strawberry.type +class TextChunk: + content: str + + +@strawberry.type +class FunctionCallChunk: + name: str + arguments: str + + +@strawberry.type +class ToolCallChunk: + id: str + function: FunctionCallChunk + + +ChatCompletionChunk: TypeAlias = Annotated[ + Union[TextChunk, ToolCallChunk], strawberry.union("ChatCompletionChunk") +] + + +@strawberry.input +class GenerativeModelInput: + provider_key: GenerativeProviderKey + name: str + + +@strawberry.input +class ChatCompletionInput: + messages: List[ChatCompletionMessageInput] + model: GenerativeModelInput + invocation_parameters: InvocationParameters + tools: Optional[List[JSONScalarType]] = UNSET + api_key: Optional[str] = strawberry.field(default=None) + + +def to_openai_chat_completion_param( + message: ChatCompletionMessageInput, +) -> "ChatCompletionMessageParam": + from openai.types.chat import ( + ChatCompletionAssistantMessageParam, + ChatCompletionSystemMessageParam, + ChatCompletionUserMessageParam, + ) + + if message.role is ChatCompletionMessageRole.USER: + return ChatCompletionUserMessageParam( + { + "content": message.content, + "role": "user", + } + ) + if message.role is ChatCompletionMessageRole.SYSTEM: + return ChatCompletionSystemMessageParam( + { + "content": message.content, + "role": "system", + } + ) + if message.role is ChatCompletionMessageRole.AI: + return ChatCompletionAssistantMessageParam( + { + "content": message.content, + "role": "assistant", + } + ) + if message.role is ChatCompletionMessageRole.TOOL: + raise NotImplementedError + assert_never(message.role) + + +@strawberry.type +class Subscription: + @strawberry.subscription + async def chat_completion( + self, info: Info[Context, None], input: ChatCompletionInput + ) -> AsyncIterator[ChatCompletionChunk]: + from openai import NOT_GIVEN, AsyncOpenAI + + client = AsyncOpenAI(api_key=input.api_key) + invocation_parameters = jsonify(input.invocation_parameters) + + in_memory_span_exporter = InMemorySpanExporter() + tracer_provider = TracerProvider() + tracer_provider.add_span_processor( + span_processor=SimpleSpanProcessor(span_exporter=in_memory_span_exporter) + ) + tracer = tracer_provider.get_tracer(__name__) + span_name = "ChatCompletion" + with tracer.start_span( + span_name, + attributes=dict( + chain( + _llm_span_kind(), + _llm_model_name(input.model.name), + _llm_tools(input.tools or []), + _llm_input_messages(input.messages), + _llm_invocation_parameters(invocation_parameters), + _input_value_and_mime_type(input), + ) + ), + ) as span: + response_chunks = [] + text_chunks: List[TextChunk] = [] + tool_call_chunks: DefaultDict[ToolCallIndex, List[ToolCallChunk]] = defaultdict(list) + role: Optional[str] = None + async for chunk in await client.chat.completions.create( + messages=(to_openai_chat_completion_param(message) for message in input.messages), + model=input.model.name, + stream=True, + tools=input.tools or NOT_GIVEN, + **invocation_parameters, + ): + response_chunks.append(chunk) + choice = chunk.choices[0] + delta = choice.delta + if role is None: + role = delta.role + if choice.finish_reason is None: + if isinstance(chunk_content := delta.content, str): + text_chunk = TextChunk(content=chunk_content) + yield text_chunk + text_chunks.append(text_chunk) + if (tool_calls := delta.tool_calls) is not None: + for tool_call_index, tool_call in enumerate(tool_calls): + if (function := tool_call.function) is not None: + if (tool_call_id := tool_call.id) is None: + first_tool_call_chunk = tool_call_chunks[tool_call_index][0] + tool_call_id = first_tool_call_chunk.id + tool_call_chunk = ToolCallChunk( + id=tool_call_id, + function=FunctionCallChunk( + name=function.name or "", + arguments=function.arguments or "", + ), + ) + yield tool_call_chunk + tool_call_chunks[tool_call_index].append(tool_call_chunk) + span.set_status(StatusCode.OK) + assert role is not None + span.set_attributes( + dict( + chain( + _output_value_and_mime_type(response_chunks), + _llm_output_messages(text_chunks, tool_call_chunks), + ) + ) + ) + assert len(spans := in_memory_span_exporter.get_finished_spans()) == 1 + finished_span = spans[0] + assert finished_span.start_time is not None + assert finished_span.end_time is not None + assert (attributes := finished_span.attributes) is not None + start_time = _datetime(epoch_nanoseconds=finished_span.start_time) + end_time = _datetime(epoch_nanoseconds=finished_span.end_time) + trace_id = _hex(finished_span.context.trace_id) + span_id = _hex(finished_span.context.span_id) + status = finished_span.status + async with info.context.db() as session: + if ( + playground_project_id := await session.scalar( + select(models.Project.id).where(models.Project.name == PLAYGROUND_PROJECT_NAME) + ) + ) is None: + playground_project_id = await session.scalar( + insert(models.Project) + .returning(models.Project.id) + .values( + name=PLAYGROUND_PROJECT_NAME, + description="Traces from prompt playground", + ) + ) + trace_rowid = await session.scalar( + insert(models.Trace) + .returning(models.Trace.id) + .values( + project_rowid=playground_project_id, + trace_id=trace_id, + start_time=start_time, + end_time=end_time, + ) + ) + await session.execute( + insert(models.Span).values( + trace_rowid=trace_rowid, + span_id=span_id, + parent_id=None, + name=span_name, + span_kind=LLM, + start_time=start_time, + end_time=end_time, + attributes=unflatten(attributes.items()), + events=finished_span.events, + status_code=status.status_code.name, + status_message=status.description or "", + cumulative_error_count=int(not status.is_ok), + cumulative_llm_token_count_prompt=0, + cumulative_llm_token_count_completion=0, + llm_token_count_prompt=0, + llm_token_count_completion=0, + ) + ) + info.context.event_queue.put(SpanInsertEvent(ids=(playground_project_id,))) + + +def _llm_span_kind() -> Iterator[Tuple[str, Any]]: + yield OPENINFERENCE_SPAN_KIND, LLM + + +def _llm_model_name(model_name: str) -> Iterator[Tuple[str, Any]]: + yield LLM_MODEL_NAME, model_name + + +def _llm_invocation_parameters(invocation_parameters: Dict[str, Any]) -> Iterator[Tuple[str, Any]]: + yield LLM_INVOCATION_PARAMETERS, safe_json_dumps(invocation_parameters) + + +def _llm_tools(tools: List[JSONScalarType]) -> Iterator[Tuple[str, Any]]: + for tool_index, tool in enumerate(tools): + yield f"{LLM_TOOLS}.{tool_index}.{TOOL_JSON_SCHEMA}", json.dumps(tool) + + +def _input_value_and_mime_type(input: ChatCompletionInput) -> Iterator[Tuple[str, Any]]: + assert any(field.name == (api_key := "api_key") for field in fields(ChatCompletionInput)) + yield INPUT_MIME_TYPE, JSON + yield INPUT_VALUE, safe_json_dumps({k: v for k, v in jsonify(input).items() if k != api_key}) + + +def _output_value_and_mime_type(output: Any) -> Iterator[Tuple[str, Any]]: + yield OUTPUT_MIME_TYPE, JSON + yield OUTPUT_VALUE, safe_json_dumps(jsonify(output)) + + +def _llm_input_messages(messages: List[ChatCompletionMessageInput]) -> Iterator[Tuple[str, Any]]: + for i, message in enumerate(messages): + yield f"{LLM_INPUT_MESSAGES}.{i}.{MESSAGE_ROLE}", message.role.value.lower() + yield f"{LLM_INPUT_MESSAGES}.{i}.{MESSAGE_CONTENT}", message.content + + +def _llm_output_messages( + text_chunks: List[TextChunk], + tool_call_chunks: DefaultDict[ToolCallIndex, List[ToolCallChunk]], +) -> Iterator[Tuple[str, Any]]: + yield f"{LLM_OUTPUT_MESSAGES}.0.{MESSAGE_ROLE}", "assistant" + if content := "".join(chunk.content for chunk in text_chunks): + yield f"{LLM_OUTPUT_MESSAGES}.0.{MESSAGE_CONTENT}", content + for tool_call_index, tool_call_chunks_ in tool_call_chunks.items(): + if tool_call_chunks_ and (name := tool_call_chunks_[0].function.name): + yield ( + f"{LLM_OUTPUT_MESSAGES}.0.{MESSAGE_TOOL_CALLS}.{tool_call_index}.{TOOL_CALL_FUNCTION_NAME}", + name, + ) + if arguments := "".join(chunk.function.arguments for chunk in tool_call_chunks_): + yield ( + f"{LLM_OUTPUT_MESSAGES}.0.{MESSAGE_TOOL_CALLS}.{tool_call_index}.{TOOL_CALL_FUNCTION_ARGUMENTS_JSON}", + arguments, + ) + + +def _hex(number: int) -> str: + """ + Converts an integer to a hexadecimal string. + """ + return hex(number)[2:] + + +def _datetime(*, epoch_nanoseconds: float) -> datetime: + """ + Converts a Unix epoch timestamp in nanoseconds to a datetime. + """ + epoch_seconds = epoch_nanoseconds / 1e9 + return datetime.fromtimestamp(epoch_seconds) + + +JSON = OpenInferenceMimeTypeValues.JSON.value + +LLM = OpenInferenceSpanKindValues.LLM.value + +OPENINFERENCE_SPAN_KIND = SpanAttributes.OPENINFERENCE_SPAN_KIND +INPUT_MIME_TYPE = SpanAttributes.INPUT_MIME_TYPE +INPUT_VALUE = SpanAttributes.INPUT_VALUE +OUTPUT_MIME_TYPE = SpanAttributes.OUTPUT_MIME_TYPE +OUTPUT_VALUE = SpanAttributes.OUTPUT_VALUE +LLM_INPUT_MESSAGES = SpanAttributes.LLM_INPUT_MESSAGES +LLM_OUTPUT_MESSAGES = SpanAttributes.LLM_OUTPUT_MESSAGES +LLM_MODEL_NAME = SpanAttributes.LLM_MODEL_NAME +LLM_INVOCATION_PARAMETERS = SpanAttributes.LLM_INVOCATION_PARAMETERS +LLM_TOOLS = SpanAttributes.LLM_TOOLS + +MESSAGE_CONTENT = MessageAttributes.MESSAGE_CONTENT +MESSAGE_ROLE = MessageAttributes.MESSAGE_ROLE +MESSAGE_TOOL_CALLS = MessageAttributes.MESSAGE_TOOL_CALLS + +TOOL_CALL_FUNCTION_NAME = ToolCallAttributes.TOOL_CALL_FUNCTION_NAME +TOOL_CALL_FUNCTION_ARGUMENTS_JSON = ToolCallAttributes.TOOL_CALL_FUNCTION_ARGUMENTS_JSON + +TOOL_JSON_SCHEMA = ToolAttributes.TOOL_JSON_SCHEMA diff --git a/src/phoenix/server/api/types/ChatCompletionMessageRole.py b/src/phoenix/server/api/types/ChatCompletionMessageRole.py new file mode 100644 index 0000000000..be61a677df --- /dev/null +++ b/src/phoenix/server/api/types/ChatCompletionMessageRole.py @@ -0,0 +1,11 @@ +from enum import Enum + +import strawberry + + +@strawberry.enum +class ChatCompletionMessageRole(Enum): + USER = "USER" + SYSTEM = "SYSTEM" + TOOL = "TOOL" + AI = "AI" # E.g. the assistant. Normalize to AI for consistency. diff --git a/src/phoenix/server/api/types/GenerativeProvider.py b/src/phoenix/server/api/types/GenerativeProvider.py new file mode 100644 index 0000000000..a9a41ce5ac --- /dev/null +++ b/src/phoenix/server/api/types/GenerativeProvider.py @@ -0,0 +1,16 @@ +from enum import Enum + +import strawberry + + +@strawberry.enum +class GenerativeProviderKey(Enum): + OPENAI = "OPENAI" + ANTHROPIC = "ANTHROPIC" + AZURE_OPENAI = "AZURE_OPENAI" + + +@strawberry.type +class GenerativeProvider: + name: str + key: GenerativeProviderKey diff --git a/src/phoenix/server/app.py b/src/phoenix/server/app.py index 0807f8668e..f648f1d7a0 100644 --- a/src/phoenix/server/app.py +++ b/src/phoenix/server/app.py @@ -36,19 +36,21 @@ from sqlalchemy import select from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession, async_sessionmaker from starlette.datastructures import State as StarletteState -from starlette.exceptions import HTTPException +from starlette.exceptions import HTTPException, WebSocketException from starlette.middleware import Middleware from starlette.middleware.authentication import AuthenticationMiddleware from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint from starlette.requests import Request -from starlette.responses import PlainTextResponse, Response +from starlette.responses import JSONResponse, PlainTextResponse, Response from starlette.staticfiles import StaticFiles from starlette.status import HTTP_401_UNAUTHORIZED from starlette.templating import Jinja2Templates from starlette.types import Scope, StatefulLifespan +from starlette.websockets import WebSocket from strawberry.extensions import SchemaExtension from strawberry.fastapi import GraphQLRouter from strawberry.schema import BaseSchema +from strawberry.subscriptions import GRAPHQL_TRANSPORT_WS_PROTOCOL from typing_extensions import TypeAlias import phoenix @@ -580,6 +582,7 @@ def get_context() -> Context: include_in_schema=False, prefix="/graphql", dependencies=(Depends(is_authenticated),) if authentication_enabled else (), + subscription_protocols=[GRAPHQL_TRANSPORT_WS_PROTOCOL], ) @@ -630,6 +633,29 @@ async def plain_text_http_exception_handler(request: Request, exc: HTTPException return PlainTextResponse(str(exc.detail), status_code=exc.status_code, headers=headers) +async def websocket_denial_response_handler(websocket: WebSocket, exc: WebSocketException) -> None: + """ + Overrides the default exception handler for WebSocketException to ensure + that the HTTP response returned when a WebSocket connection is denied has + the same status code as the raised exception. This is in keeping with the + WebSocket Denial Response Extension of the ASGI specificiation described + below. + + "Websocket connections start with the client sending a HTTP request + containing the appropriate upgrade headers. On receipt of this request a + server can choose to either upgrade the connection or respond with an HTTP + response (denying the upgrade). The core ASGI specification does not allow + for any control over the denial response, instead specifying that the HTTP + status code 403 should be returned, whereas this extension allows an ASGI + framework to control the denial response." + + For details, see: + - https://asgi.readthedocs.io/en/latest/extensions.html#websocket-denial-response + """ + assert isinstance(exc, WebSocketException) + await websocket.send_denial_response(JSONResponse(status_code=exc.code, content=exc.reason)) + + def create_app( db: DbSessionFactory, export_path: Path, @@ -776,7 +802,10 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: scaffolder_config=scaffolder_config, ), middleware=middlewares, - exception_handlers={HTTPException: plain_text_http_exception_handler}, + exception_handlers={ + HTTPException: plain_text_http_exception_handler, + WebSocketException: websocket_denial_response_handler, # type: ignore[dict-item] + }, debug=debug, swagger_ui_parameters={ "defaultModelsExpandDepth": -1, # hides the schema section in the Swagger UI diff --git a/src/phoenix/server/bearer_auth.py b/src/phoenix/server/bearer_auth.py index 3c47171c3e..7e0b1447b6 100644 --- a/src/phoenix/server/bearer_auth.py +++ b/src/phoenix/server/bearer_auth.py @@ -7,10 +7,11 @@ Callable, Optional, Tuple, + cast, ) import grpc -from fastapi import HTTPException, Request +from fastapi import HTTPException, Request, WebSocket, WebSocketException from grpc_interceptor import AsyncServerInterceptor from grpc_interceptor.exceptions import Unauthenticated from starlette.authentication import AuthCredentials, AuthenticationBackend, BaseUser @@ -116,12 +117,19 @@ async def intercept( raise Unauthenticated() -async def is_authenticated(request: Request) -> None: +async def is_authenticated( + # fastapi dependencies require non-optional types + request: Request = cast(Request, None), + websocket: WebSocket = cast(WebSocket, None), +) -> None: """ - Raises a 401 if the request is not authenticated. + Raises a 401 if the request or websocket connection is not authenticated. """ - if not isinstance((user := request.user), PhoenixUser): + assert request or websocket + if request and not isinstance((user := request.user), PhoenixUser): raise HTTPException(status_code=HTTP_401_UNAUTHORIZED, detail="Invalid token") + if websocket and not isinstance((user := websocket.user), PhoenixUser): + raise WebSocketException(code=HTTP_401_UNAUTHORIZED, reason="Invalid token") claims = user.claims if claims.status is ClaimSetStatus.EXPIRED: raise HTTPException(status_code=HTTP_401_UNAUTHORIZED, detail="Expired token") diff --git a/src/phoenix/utilities/json.py b/src/phoenix/utilities/json.py index 691a31c8b1..d9c456dcbf 100644 --- a/src/phoenix/utilities/json.py +++ b/src/phoenix/utilities/json.py @@ -5,6 +5,8 @@ from typing import Any, Mapping, Sequence, Union, get_args, get_origin import numpy as np +from strawberry import UNSET +from strawberry.types.base import StrawberryObjectDefinition def jsonify(obj: Any) -> Any: @@ -19,6 +21,15 @@ def jsonify(obj: Any) -> Any: return [jsonify(v) for v in obj] if isinstance(obj, (dict, Mapping)): return {jsonify(k): jsonify(v) for k, v in obj.items()} + is_strawberry_type = isinstance( + getattr(obj, "__strawberry_definition__", None), StrawberryObjectDefinition + ) + if is_strawberry_type: + return { + k: jsonify(v) + for field in dataclasses.fields(obj) + if (v := getattr(obj, (k := field.name))) is not UNSET + } if dataclasses.is_dataclass(obj): return { k: jsonify(v)