diff --git a/js/examples/langchain-express/backend/Dockerfile b/js/examples/langchain-express/backend/Dockerfile index e04d00815..86acb6256 100644 --- a/js/examples/langchain-express/backend/Dockerfile +++ b/js/examples/langchain-express/backend/Dockerfile @@ -5,8 +5,8 @@ FROM base AS deps # Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed. RUN apk add --no-cache libc6-compat WORKDIR /app -COPY package*.json ./ -RUN npm ci +COPY package.json ./ +RUN npm i # Rebuild the source code only when needed FROM base AS builder diff --git a/js/examples/langchain-express/backend/package.json b/js/examples/langchain-express/backend/package.json index 0ef76a6d9..3da0c5546 100644 --- a/js/examples/langchain-express/backend/package.json +++ b/js/examples/langchain-express/backend/package.json @@ -10,10 +10,10 @@ "type:check": "tsc --noEmit" }, "dependencies": { - "@arizeai/openinference-instrumentation-langchain": "^0.2.0", - "@langchain/community": "^0.2.31", - "@langchain/core": "^0.2.30", - "@langchain/openai": "^0.2.8", + "@arizeai/openinference-instrumentation-langchain": "^1.0.0", + "@langchain/community": "^0.3.9", + "@langchain/core": "^0.3.15", + "@langchain/openai": "^0.3.11", "@opentelemetry/api": "^1.9.0", "@opentelemetry/exporter-trace-otlp-http": "^0.46.0", "@opentelemetry/exporter-trace-otlp-proto": "^0.46.0", @@ -26,7 +26,7 @@ "cors": "^2.8.5", "dotenv": "^16.3.1", "express": "^4.18.2", - "langchain": "^0.2.17" + "langchain": "^0.3.4" }, "devDependencies": { "@types/cors": "^2.8.16", diff --git a/js/examples/langchain-express/backend/src/controllers/chat.controller.ts b/js/examples/langchain-express/backend/src/controllers/chat.controller.ts index 873f640b8..6323faad9 100644 --- a/js/examples/langchain-express/backend/src/controllers/chat.controller.ts +++ b/js/examples/langchain-express/backend/src/controllers/chat.controller.ts @@ -46,10 +46,10 @@ export const createChatController = llm, }); - if (response.text == null) { + if (response.answer == null) { throw new Error("No response from the model"); } - res.send(response.text); + res.send(response.answer); res.end(); } catch (error) { // eslint-disable-next-line no-console diff --git a/js/examples/langchain-express/backend/src/vector_store/store.ts b/js/examples/langchain-express/backend/src/vector_store/store.ts index d26ef428a..32ca0ae37 100644 --- a/js/examples/langchain-express/backend/src/vector_store/store.ts +++ b/js/examples/langchain-express/backend/src/vector_store/store.ts @@ -2,7 +2,7 @@ import { OpenAIEmbeddings } from "@langchain/openai"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import "cheerio"; -import { CheerioWebBaseLoader } from "langchain/document_loaders/web/cheerio"; +import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio"; import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; import { Document } from "@langchain/core/documents"; import { DOCUMENT_URLS, WEB_LOADER_TIMEOUT } from "../constants"; diff --git a/js/examples/langchain-express/frontend/Dockerfile b/js/examples/langchain-express/frontend/Dockerfile index 912c0b084..f69d0ea58 100644 --- a/js/examples/langchain-express/frontend/Dockerfile +++ b/js/examples/langchain-express/frontend/Dockerfile @@ -6,8 +6,8 @@ FROM base AS deps # Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed. RUN apk add --no-cache libc6-compat WORKDIR /app -COPY package*.json ./ -RUN npm ci +COPY package.json ./ +RUN npm i # Rebuild the source code only when needed FROM base AS builder diff --git a/js/examples/langchain-express/frontend/package.json b/js/examples/langchain-express/frontend/package.json index a7e257199..aaecc1288 100644 --- a/js/examples/langchain-express/frontend/package.json +++ b/js/examples/langchain-express/frontend/package.json @@ -8,10 +8,10 @@ "lint": "next lint" }, "dependencies": { - "@langchain/openai": "^0.0.25", + "@langchain/openai": "^0.3.11", "ai": "^2.2.27", "dotenv": "^16.3.1", - "next": "^14.1.1", + "next": "^15.0.1", "react": "^18.2.0", "react-dom": "^18.2.0", "supports-color": "^9.4.0" diff --git a/js/examples/next-openai-telemetry-app/README.md b/js/examples/next-openai-telemetry-app/README.md index c538bcd97..18e4ba0bc 100644 --- a/js/examples/next-openai-telemetry-app/README.md +++ b/js/examples/next-openai-telemetry-app/README.md @@ -23,8 +23,9 @@ To run the example locally you need to: 1. Set the required OpenAI environment variable as the token value as shown [the example env file](./.env.local.example) but in a new file called `.env.local`. 2. To run [Arize-Phoenix](https://github.com/Arize-ai/phoenix) locally run `docker run -p 6006:6006 -i -t arizephoenix/phoenix` -3. `npm ci` to install the required dependencies. -4. `npm run dev` to launch the development server. +3. `npm i` to install the required dependencies. +4. Use node version >=18.18.0 see [nvm](https://github.com/nvm-sh/nvm) for docs on how to install and manage node versions +5. `npm run dev` to launch the development server. To view [Arize-Phoenix](https://github.com/Arize-ai/phoenix) and the example app visit: diff --git a/js/examples/next-openai-telemetry-app/instrumentation.ts b/js/examples/next-openai-telemetry-app/instrumentation.ts index dc611734a..a4499285a 100644 --- a/js/examples/next-openai-telemetry-app/instrumentation.ts +++ b/js/examples/next-openai-telemetry-app/instrumentation.ts @@ -8,12 +8,15 @@ import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-proto"; import { SEMRESATTRS_PROJECT_NAME } from "@arizeai/openinference-semantic-conventions"; // For troubleshooting, set the log level to DiagLogLevel.DEBUG +// This is not required and should not be added in a production setting diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.DEBUG); export function register() { registerOTel({ serviceName: "phoenix-next-app", attributes: { + // This is not required but it will allow you to send traces to a specific + // project in phoenix [SEMRESATTRS_PROJECT_NAME]: "phoenix-next-app", }, spanProcessors: [ @@ -22,12 +25,11 @@ export function register() { headers: { api_key: process.env["PHOENIX_API_KEY"], }, - url: - process.env["PHOENIX_COLLECTOR_ENDPOINT"] || - "https://app.phoenix.arize.com/v1/traces", + url: "http://localhost:6006/v1/traces", }), spanFilter: (span) => { - // Only export spans that are OpenInference spans to negate + // Only export spans that are OpenInference to remove non-generative spans + // This should be removed if you want to export all spans return isOpenInferenceSpan(span); }, }), diff --git a/js/examples/next-openai-telemetry-app/next.config.js b/js/examples/next-openai-telemetry-app/next.config.js deleted file mode 100644 index b28ecc843..000000000 --- a/js/examples/next-openai-telemetry-app/next.config.js +++ /dev/null @@ -1,10 +0,0 @@ -/** @type {import('next').NextConfig} */ -const nextConfig = { - output: "standalone", -}; - -nextConfig.experimental = { - instrumentationHook: true, -}; - -module.exports = nextConfig; diff --git a/js/examples/next-openai-telemetry-app/next.config.ts b/js/examples/next-openai-telemetry-app/next.config.ts new file mode 100644 index 000000000..568cebaf3 --- /dev/null +++ b/js/examples/next-openai-telemetry-app/next.config.ts @@ -0,0 +1,4 @@ +import { NextConfig } from "next"; +const nextConfig: NextConfig = {}; + +export default nextConfig; diff --git a/js/packages/openinference-core/README.md b/js/packages/openinference-core/README.md index 26132ca0d..805787b78 100644 --- a/js/packages/openinference-core/README.md +++ b/js/packages/openinference-core/README.md @@ -1,5 +1,110 @@ # OpenInference Core -[![npm version](https://badge.fury.io/js/@arizeai%2Fopeninference-instrumentation.svg)](https://badge.fury.io/js/@arizeai%2Fopeninference-core) +[![npm version](https://badge.fury.io/js/@arizeai%2Fopeninference-core.svg)](https://badge.fury.io/js/@arizeai%2Fopeninference-core) This package provides OpenInference Core utilities for LLM Traces. + +## Installation + +```bash +npm install @arizeai/openinference-core # npm +pnpm add @arizeai/openinference-core # pnpm +yarn add @arizeai/openinference-core # yarn +``` + +## Customizing Spans + +The `@arizeai/openinference-core` package offers utilities to track important application metadata such as sessions and users using context attribute propagation: + +- `setSession`: to specify a session ID to track and group multi-turn conversations +- `setUser`: to specify a user ID to track different conversations with a given user +- `setMetadata`: to add custom metadata that can provide extra information to support a wide range of operational needs +- `setTag`: to add tags, to filter spans on specific keywords +- `setPromptTemplate`: to reflect the prompt template used, with its version and variables. This is useful for prompt template tracking +- `setAttributes`: to add multiple custom attributes at the same time + +> [!NOTE] All @arizeai/openinference auto instrumentation packages will pull attributes off of context and add them to spans + +### Examples + +`setSession` + +```typescript +import { context } from "@opentelemetry/api"; +import { setSession } from "@openinference-core"; + +context.with(setSession(context.active(), { sessionId: "session-id" }), () => { + // Calls within this block will generate spans with the attributes: + // "session.id" = "session-id" +}); +``` + +Each setter function returns a new active context, so they can be chained together. + +```typescript +import { context } from "@opentelemetry/api"; +import { setAttributes, setSession } from "@openinference-core"; + +context.with( + setAttributes(setSession(context.active(), { sessionId: "session-id" }), { + myAttribute: "test", + }), + () => { + // Calls within this block will generate spans with the attributes: + // "myAttribute" = "test" + // "session.id" = "session-id" + }, +); +``` + +Additionally, they can be used in conjunction with the [OpenInference Semantic Conventions](../openinference-semantic-conventions/). + +```typescript +import { context } from "@opentelemetry/api" +import { setAttributes } from "@openinference-core" +import { SemanticConventions } from "@arizeai/openinference-semantic-conventions"; + + +context.with( + setAttributes( + { [SemanticConventions.SESSION_ID: "session-id" } + ), + () => { + // Calls within this block will generate spans with the attributes: + // "session.id" = "session-id" + } +) +``` + +If you are creating spans manually and want to propagate context attributes you've set to those spans as well you can use the `getAttributesFromContext` utility to do that. you can read more about customizing spans in our [docs](https://docs.arize.com/phoenix/tracing/how-to-tracing/customize-spans). + +```typescript +import { getAttributesFromContext } from "@arizeai/openinference-core"; +import { context, trace } from "@opentelemetry/api"; + +const contextAttributes = getAttributesFromContext(context.active()); +const tracer = trace.getTracer("example"); +const span = tracer.startSpan("example span"); +span.setAttributes(contextAttributes); +span.end(); +``` + +## Trace Config + +This package also provides support for controlling settings like data privacy and payload sizes. For instance, you may want to keep sensitive information from being logged for security reasons, or you may want to limit the size of the base64 encoded images logged to reduced payload size. + +> [!NOTE] These values can also be controlled via environment variables, see more information [here](https://github.com/Arize-ai/openinference/blob/main/spec/configuration.md). + +Here is an example of how to configure these settings using the OpenAI auto instrumentation. Note that all of our auto instrumentations will accept a traceConfig object. + +```typescript +import { OpenAIInstrumentation } from "@arizeai/openinference-instrumentation-openai"; + +/** + * Everything left out of here will fallback to + * environment variables then defaults + */ +const traceConfig = { hideInputs: true }; + +const instrumentation = new OpenAIInstrumentation({ traceConfig }); +``` diff --git a/spec/configuration.md b/spec/configuration.md index a4bead02c..159041c9f 100644 --- a/spec/configuration.md +++ b/spec/configuration.md @@ -18,12 +18,21 @@ The possible settings are: | OPENINFERENCE_HIDE_EMBEDDING_VECTORS | Hides returned embedding vectors | bool | False | | OPENINFERENCE_BASE64_IMAGE_MAX_LENGTH | Limits characters of a base64 encoding of an image | int | 32,000 | -## Setup a Configuration in Python +## Usage -If you are working in Python, and want to set up a configuration different than the default values you can: -- Create the environment variables with the desired values, or -- Define the configuration in code as shown below, passing it to the `instrument()` method of your instrumentator (the example below demonstrates using the OpenAiInstrumentator) - ```python +To set up this configuration you can either: +- Set environment variables as specified above +- Define the configuration in code as shown below +- Do nothing and fall back to the default values +- Use a combination of the three, the order of precedence is: + - Values set in the TraceConfig in code + - Environment variables + - Default values + +### Python + +If you are working in Python, and want to set up a configuration different than the default you can define the configuration in code as shown below, passing it to the `instrument()` method of your instrumentator (the example below demonstrates using the OpenAIInstrumentator) +```python from openinference.instrumentation import TraceConfig config = TraceConfig( hide_inputs=..., @@ -42,5 +51,20 @@ If you are working in Python, and want to set up a configuration different than tracer_provider=tracer_provider, config=config, ) - ``` -- Do nothing and the default values will be used. +``` + +### Javascript + +If you are working in JavaScript, and want to set up a configuration different than the default you can define the configuration as shown below and pass it into any OpenInference instrumentation (the example below demonstrates using the OpenAIInstrumentation) + +```typescript +import { OpenAIInstrumentation } from "@arizeai/openinference-instrumentation-openai" + +/** + * Everything left out of here will fallback to + * environment variables then defaults + */ +const traceConfig = { hideInputs: true } + +const instrumentation = new OpenAIInstrumentation({ traceConfig }) +```