Initial Commit

This commit is contained in:
2026-03-06 04:54:20 -04:00
commit 63677bfcf5
9332 changed files with 1507319 additions and 0 deletions

1
node_modules/.bin/esbuild generated vendored Symbolic link
View File

@@ -0,0 +1 @@
../esbuild/bin/esbuild

1
node_modules/.bin/nanoid generated vendored Symbolic link
View File

@@ -0,0 +1 @@
../nanoid/bin/nanoid.cjs

1
node_modules/.bin/proto-loader-gen-types generated vendored Symbolic link
View File

@@ -0,0 +1 @@
../@grpc/proto-loader/build/bin/proto-loader-gen-types.js

1
node_modules/.bin/rollup generated vendored Symbolic link
View File

@@ -0,0 +1 @@
../rollup/dist/bin/rollup

1
node_modules/.bin/vite generated vendored Symbolic link
View File

@@ -0,0 +1 @@
../vite/bin/vite.js

1281
node_modules/.package-lock.json generated vendored Normal file

File diff suppressed because it is too large Load Diff

31
node_modules/.vite/deps/_metadata.json generated vendored Normal file
View File

@@ -0,0 +1,31 @@
{
"hash": "318f6333",
"configHash": "bec28cc9",
"lockfileHash": "5b715ce7",
"browserHash": "d7b6ad98",
"optimized": {
"firebase/app": {
"src": "../../firebase/app/dist/esm/index.esm.js",
"file": "firebase_app.js",
"fileHash": "ceccada5",
"needsInterop": false
},
"firebase/auth": {
"src": "../../firebase/auth/dist/esm/index.esm.js",
"file": "firebase_auth.js",
"fileHash": "5b73b330",
"needsInterop": false
},
"firebase/firestore": {
"src": "../../firebase/firestore/dist/esm/index.esm.js",
"file": "firebase_firestore.js",
"fileHash": "00e0b21d",
"needsInterop": false
}
},
"chunks": {
"chunk-2ZX2ZIUA": {
"file": "chunk-2ZX2ZIUA.js"
}
}
}

2373
node_modules/.vite/deps/chunk-2ZX2ZIUA.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

7
node_modules/.vite/deps/chunk-2ZX2ZIUA.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

56
node_modules/.vite/deps/firebase_app.js generated vendored Normal file
View File

@@ -0,0 +1,56 @@
import {
DEFAULT_ENTRY_NAME,
FirebaseError,
SDK_VERSION,
_addComponent,
_addOrOverwriteComponent,
_apps,
_clearComponents,
_components,
_getProvider,
_isFirebaseApp,
_isFirebaseServerApp,
_isFirebaseServerAppSettings,
_registerComponent,
_removeServiceInstance,
_serverApps,
deleteApp,
getApp,
getApps,
initializeApp,
initializeServerApp,
onLog,
registerVersion,
setLogLevel
} from "./chunk-2ZX2ZIUA.js";
// node_modules/firebase/app/dist/esm/index.esm.js
var name = "firebase";
var version = "12.10.0";
registerVersion(name, version, "app");
export {
FirebaseError,
SDK_VERSION,
DEFAULT_ENTRY_NAME as _DEFAULT_ENTRY_NAME,
_addComponent,
_addOrOverwriteComponent,
_apps,
_clearComponents,
_components,
_getProvider,
_isFirebaseApp,
_isFirebaseServerApp,
_isFirebaseServerAppSettings,
_registerComponent,
_removeServiceInstance,
_serverApps,
deleteApp,
getApp,
getApps,
initializeApp,
initializeServerApp,
onLog,
registerVersion,
setLogLevel
};
//# sourceMappingURL=firebase_app.js.map

7
node_modules/.vite/deps/firebase_app.js.map generated vendored Normal file
View File

@@ -0,0 +1,7 @@
{
"version": 3,
"sources": ["../../firebase/app/index.ts"],
"sourcesContent": ["/**\n * @license\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nimport { registerVersion } from '@firebase/app';\nimport { name, version } from '../package.json';\n\nregisterVersion(name, version, 'app');\nexport * from '@firebase/app';\n"],
"mappings": ";;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAmBA,gBAAgB,MAAM,SAAS,KAAK;",
"names": []
}

8225
node_modules/.vite/deps/firebase_auth.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

7
node_modules/.vite/deps/firebase_auth.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

18614
node_modules/.vite/deps/firebase_firestore.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

7
node_modules/.vite/deps/firebase_firestore.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

3
node_modules/.vite/deps/package.json generated vendored Normal file
View File

@@ -0,0 +1,3 @@
{
"type": "module"
}

3
node_modules/@esbuild/linux-x64/README.md generated vendored Normal file
View File

@@ -0,0 +1,3 @@
# esbuild
This is the Linux 64-bit binary for esbuild, a JavaScript bundler and minifier. See https://github.com/evanw/esbuild for details.

BIN
node_modules/@esbuild/linux-x64/bin/esbuild generated vendored Executable file

Binary file not shown.

20
node_modules/@esbuild/linux-x64/package.json generated vendored Normal file
View File

@@ -0,0 +1,20 @@
{
"name": "@esbuild/linux-x64",
"version": "0.27.3",
"description": "The Linux 64-bit binary for esbuild, a JavaScript bundler.",
"repository": {
"type": "git",
"url": "git+https://github.com/evanw/esbuild.git"
},
"license": "MIT",
"preferUnplugged": true,
"engines": {
"node": ">=18"
},
"os": [
"linux"
],
"cpu": [
"x64"
]
}

5
node_modules/@firebase/ai/README.md generated vendored Normal file
View File

@@ -0,0 +1,5 @@
# @firebase/ai
This is the Firebase AI component of the Firebase JS SDK.
**This package is not intended for direct usage, and should only be used via the officially supported [firebase](https://www.npmjs.com/package/firebase) package.**

3472
node_modules/@firebase/ai/dist/ai-public.d.ts generated vendored Normal file

File diff suppressed because it is too large Load Diff

3712
node_modules/@firebase/ai/dist/ai.d.ts generated vendored Normal file

File diff suppressed because it is too large Load Diff

4765
node_modules/@firebase/ai/dist/esm/index.esm.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

1
node_modules/@firebase/ai/dist/esm/index.esm.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

1
node_modules/@firebase/ai/dist/esm/package.json generated vendored Normal file
View File

@@ -0,0 +1 @@
{"type":"module"}

121
node_modules/@firebase/ai/dist/esm/src/api.d.ts generated vendored Normal file
View File

@@ -0,0 +1,121 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { FirebaseApp } from '@firebase/app';
import { AI_TYPE } from './constants';
import { AIService } from './service';
import { AI, AIOptions } from './public-types';
import { ImagenModelParams, HybridParams, ModelParams, RequestOptions, LiveModelParams } from './types';
import { AIError } from './errors';
import { AIModel, GenerativeModel, LiveGenerativeModel, ImagenModel } from './models';
import { TemplateGenerativeModel } from './models/template-generative-model';
import { TemplateImagenModel } from './models/template-imagen-model';
export { ChatSession } from './methods/chat-session';
export { LiveSession } from './methods/live-session';
export * from './requests/schema-builder';
export { ImagenImageFormat } from './requests/imagen-image-format';
export { AIModel, GenerativeModel, LiveGenerativeModel, ImagenModel, TemplateGenerativeModel, TemplateImagenModel, AIError };
export { Backend, VertexAIBackend, GoogleAIBackend } from './backend';
export { startAudioConversation, AudioConversationController, StartAudioConversationOptions } from './methods/live-session-helpers';
declare module '@firebase/component' {
interface NameServiceMapping {
[AI_TYPE]: AIService;
}
}
/**
* Returns the default {@link AI} instance that is associated with the provided
* {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the
* default settings.
*
* @example
* ```javascript
* const ai = getAI(app);
* ```
*
* @example
* ```javascript
* // Get an AI instance configured to use the Gemini Developer API (via Google AI).
* const ai = getAI(app, { backend: new GoogleAIBackend() });
* ```
*
* @example
* ```javascript
* // Get an AI instance configured to use the Vertex AI Gemini API.
* const ai = getAI(app, { backend: new VertexAIBackend() });
* ```
*
* @param app - The {@link @firebase/app#FirebaseApp} to use.
* @param options - {@link AIOptions} that configure the AI instance.
* @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}.
*
* @public
*/
export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI;
/**
* Returns a {@link GenerativeModel} class with methods for inference
* and other functionality.
*
* @public
*/
export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel;
/**
* Returns an {@link ImagenModel} class with methods for using Imagen.
*
* Only Imagen 3 models (named `imagen-3.0-*`) are supported.
*
* @param ai - An {@link AI} instance.
* @param modelParams - Parameters to use when making Imagen requests.
* @param requestOptions - Additional options to use when making requests.
*
* @throws If the `apiKey` or `projectId` fields are missing in your
* Firebase config.
*
* @public
*/
export declare function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel;
/**
* Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication.
*
* The Live API is only supported in modern browser windows and Node >= 22.
*
* @param ai - An {@link AI} instance.
* @param modelParams - Parameters to use when setting up a {@link LiveSession}.
* @throws If the `apiKey` or `projectId` fields are missing in your
* Firebase config.
*
* @beta
*/
export declare function getLiveGenerativeModel(ai: AI, modelParams: LiveModelParams): LiveGenerativeModel;
/**
* Returns a {@link TemplateGenerativeModel} class for executing server-side
* templates.
*
* @param ai - An {@link AI} instance.
* @param requestOptions - Additional options to use when making requests.
*
* @beta
*/
export declare function getTemplateGenerativeModel(ai: AI, requestOptions?: RequestOptions): TemplateGenerativeModel;
/**
* Returns a {@link TemplateImagenModel} class for executing server-side
* Imagen templates.
*
* @param ai - An {@link AI} instance.
* @param requestOptions - Additional options to use when making requests.
*
* @beta
*/
export declare function getTemplateImagenModel(ai: AI, requestOptions?: RequestOptions): TemplateImagenModel;

98
node_modules/@firebase/ai/dist/esm/src/backend.d.ts generated vendored Normal file
View File

@@ -0,0 +1,98 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { BackendType } from './public-types';
/**
* Abstract base class representing the configuration for an AI service backend.
* This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for
* the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and
* {@link VertexAIBackend} for the Vertex AI Gemini API.
*
* @public
*/
export declare abstract class Backend {
/**
* Specifies the backend type.
*/
readonly backendType: BackendType;
/**
* Protected constructor for use by subclasses.
* @param type - The backend type.
*/
protected constructor(type: BackendType);
/**
* @internal
*/
abstract _getModelPath(project: string, model: string): string;
/**
* @internal
*/
abstract _getTemplatePath(project: string, templateId: string): string;
}
/**
* Configuration class for the Gemini Developer API.
*
* Use this with {@link AIOptions} when initializing the AI service via
* {@link getAI | getAI()} to specify the Gemini Developer API as the backend.
*
* @public
*/
export declare class GoogleAIBackend extends Backend {
/**
* Creates a configuration object for the Gemini Developer API backend.
*/
constructor();
/**
* @internal
*/
_getModelPath(project: string, model: string): string;
/**
* @internal
*/
_getTemplatePath(project: string, templateId: string): string;
}
/**
* Configuration class for the Vertex AI Gemini API.
*
* Use this with {@link AIOptions} when initializing the AI service via
* {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend.
*
* @public
*/
export declare class VertexAIBackend extends Backend {
/**
* The region identifier.
* See {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}
* for a list of supported locations.
*/
readonly location: string;
/**
* Creates a configuration object for the Vertex AI backend.
*
* @param location - The region identifier, defaulting to `us-central1`;
* see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}
* for a list of supported locations.
*/
constructor(location?: string);
/**
* @internal
*/
_getModelPath(project: string, model: string): string;
/**
* @internal
*/
_getTemplatePath(project: string, templateId: string): string;
}

29
node_modules/@firebase/ai/dist/esm/src/constants.d.ts generated vendored Normal file
View File

@@ -0,0 +1,29 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export declare const AI_TYPE = "AI";
export declare const DEFAULT_LOCATION = "us-central1";
export declare const DEFAULT_DOMAIN = "firebasevertexai.googleapis.com";
export declare const STAGING_URL = "https://staging-firebasevertexai.sandbox.googleapis.com";
export declare const DEFAULT_API_VERSION = "v1beta";
export declare const PACKAGE_VERSION: string;
export declare const LANGUAGE_TAG = "gl-js";
export declare const HYBRID_TAG = "hybrid";
export declare const DEFAULT_FETCH_TIMEOUT_MS: number;
/**
* Defines the name of the default in-cloud model to use for hybrid inference.
*/
export declare const DEFAULT_HYBRID_IN_CLOUD_MODEL = "gemini-2.5-flash-lite";

35
node_modules/@firebase/ai/dist/esm/src/errors.d.ts generated vendored Normal file
View File

@@ -0,0 +1,35 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { FirebaseError } from '@firebase/util';
import { AIErrorCode, CustomErrorData } from './types';
/**
* Error class for the Firebase AI SDK.
*
* @public
*/
export declare class AIError extends FirebaseError {
readonly code: AIErrorCode;
readonly customErrorData?: CustomErrorData | undefined;
/**
* Constructs a new instance of the `AIError` class.
*
* @param code - The error code from {@link (AIErrorCode:type)}.
* @param message - A human-readable message describing the error.
* @param customErrorData - Optional error data.
*/
constructor(code: AIErrorCode, message: string, customErrorData?: CustomErrorData | undefined);
}

View File

@@ -0,0 +1,19 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ComponentContainer, InstanceFactoryOptions } from '@firebase/component';
import { AIService } from './service';
export declare function factory(container: ComponentContainer, { instanceIdentifier }: InstanceFactoryOptions): AIService;

View File

@@ -0,0 +1,19 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ComponentContainer, InstanceFactoryOptions } from '@firebase/component';
import { AIService } from './service';
export declare function factory(container: ComponentContainer, { instanceIdentifier }: InstanceFactoryOptions): AIService;

View File

@@ -0,0 +1,73 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { CountTokensRequest, GenerateContentCandidate, GenerateContentRequest, GenerateContentResponse, PromptFeedback } from './types';
import { GoogleAIGenerateContentResponse, GoogleAIGenerateContentCandidate, GoogleAICountTokensRequest } from './types/googleai';
/**
* This SDK supports both the Vertex AI Gemini API and the Gemini Developer API (using Google AI).
* The public API prioritizes the format used by the Vertex AI Gemini API.
* We avoid having two sets of types by translating requests and responses between the two API formats.
* This translation allows developers to switch between the Vertex AI Gemini API and the Gemini Developer API
* with minimal code changes.
*
* In here are functions that map requests and responses between the two API formats.
* Requests in the Vertex AI format are mapped to the Google AI format before being sent.
* Responses from the Google AI backend are mapped back to the Vertex AI format before being returned to the user.
*/
/**
* Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI.
*
* @param generateContentRequest The {@link GenerateContentRequest} to map.
* @returns A {@link GenerateContentResponse} that conforms to the Google AI format.
*
* @throws If the request contains properties that are unsupported by Google AI.
*
* @internal
*/
export declare function mapGenerateContentRequest(generateContentRequest: GenerateContentRequest): GenerateContentRequest;
/**
* Maps a {@link GenerateContentResponse} from Google AI to the format of the
* {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API.
*
* @param googleAIResponse The {@link GenerateContentResponse} from Google AI.
* @returns A {@link GenerateContentResponse} that conforms to the public API's format.
*
* @internal
*/
export declare function mapGenerateContentResponse(googleAIResponse: GoogleAIGenerateContentResponse): GenerateContentResponse;
/**
* Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI.
*
* @param countTokensRequest The {@link CountTokensRequest} to map.
* @param model The model to count tokens with.
* @returns A {@link CountTokensRequest} that conforms to the Google AI format.
*
* @internal
*/
export declare function mapCountTokensRequest(countTokensRequest: CountTokensRequest, model: string): GoogleAICountTokensRequest;
/**
* Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms
* to the Vertex AI API format.
*
* @param candidates The {@link GoogleAIGenerateContentCandidate} to map.
* @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format.
*
* @throws If any {@link Part} in the candidates has a `videoMetadata` property.
*
* @internal
*/
export declare function mapGenerateContentCandidates(candidates: GoogleAIGenerateContentCandidate[]): GenerateContentCandidate[];
export declare function mapPromptFeedback(promptFeedback: PromptFeedback): PromptFeedback;

30
node_modules/@firebase/ai/dist/esm/src/helpers.d.ts generated vendored Normal file
View File

@@ -0,0 +1,30 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Backend } from './backend';
/**
* Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI}
* instances by backend type.
*
* @internal
*/
export declare function encodeInstanceIdentifier(backend: Backend): string;
/**
* Decodes an instance identifier string into a {@link Backend}.
*
* @internal
*/
export declare function decodeInstanceIdentifier(instanceIdentifier: string): Backend;

13
node_modules/@firebase/ai/dist/esm/src/index.d.ts generated vendored Normal file
View File

@@ -0,0 +1,13 @@
/**
* The Firebase AI Web SDK.
*
* @packageDocumentation
*/
import { LanguageModel } from './types/language-model';
declare global {
interface Window {
LanguageModel: LanguageModel;
}
}
export * from './api';
export * from './public-types';

View File

@@ -0,0 +1,7 @@
/**
* The Firebase AI Web SDK.
*
* @packageDocumentation
*/
export * from './api';
export * from './public-types';

18
node_modules/@firebase/ai/dist/esm/src/logger.d.ts generated vendored Normal file
View File

@@ -0,0 +1,18 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Logger } from '@firebase/logger';
export declare const logger: Logger;

View File

@@ -0,0 +1,18 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Content } from '../types';
export declare function validateChatHistory(history: Content[]): void;

View File

@@ -0,0 +1,77 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Content, FunctionCall, FunctionResponsePart, GenerateContentRequest, GenerateContentResponse, GenerateContentResult, GenerateContentStreamResult, Part, RequestOptions, SingleRequestOptions, StartChatParams } from '../types';
import { ApiSettings } from '../types/internal';
import { ChromeAdapter } from '../types/chrome-adapter';
/**
* ChatSession class that enables sending chat messages and stores
* history of sent and received messages so far.
*
* @public
*/
export declare class ChatSession {
model: string;
private chromeAdapter?;
params?: StartChatParams | undefined;
requestOptions?: RequestOptions | undefined;
private _apiSettings;
private _history;
/**
* Ensures sequential execution of chat messages to maintain history order.
* Each call waits for the previous one to settle before proceeding.
*/
private _sendPromise;
constructor(apiSettings: ApiSettings, model: string, chromeAdapter?: ChromeAdapter | undefined, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
/**
* Gets the chat history so far. Blocked prompts are not added to history.
* Neither blocked candidates nor the prompts that generated them are added
* to history.
*/
getHistory(): Promise<Content[]>;
/**
* Format Content into a request for generateContent or
* generateContentStream.
* @internal
*/
_formatRequest(incomingContent: Content, tempHistory: Content[]): GenerateContentRequest;
/**
* Sends a chat message and receives a non-streaming
* {@link GenerateContentResult}
*/
sendMessage(request: string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
/**
* Sends a chat message and receives the response as a
* {@link GenerateContentStreamResult} containing an iterable stream
* and a response promise.
*/
sendMessageStream(request: string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
/**
* Get function calls that the SDK has references to actually call.
* This is all-or-nothing. If the model is requesting multiple
* function calls, all of them must have references in order for
* automatic function calling to work.
*
* @internal
*/
_getCallableFunctionCalls(response?: GenerateContentResponse): FunctionCall[] | undefined;
/**
* Call user-defined functions if requested by the model, and return
* the response that should be sent to the model.
* @internal
*/
_callFunctionsAsNeeded(functionCalls: FunctionCall[]): Promise<FunctionResponsePart[]>;
}

View File

@@ -0,0 +1,124 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { CountTokensRequest, GenerateContentRequest, InferenceMode, OnDeviceParams } from '../types';
import { ChromeAdapter } from '../types/chrome-adapter';
import { LanguageModel } from '../types/language-model';
/**
* Defines an inference "backend" that uses Chrome's on-device model,
* and encapsulates logic for detecting when on-device inference is
* possible.
*/
export declare class ChromeAdapterImpl implements ChromeAdapter {
languageModelProvider: LanguageModel;
mode: InferenceMode;
static SUPPORTED_MIME_TYPES: string[];
private isDownloading;
private downloadPromise;
private oldSession;
onDeviceParams: OnDeviceParams;
constructor(languageModelProvider: LanguageModel, mode: InferenceMode, onDeviceParams?: OnDeviceParams);
/**
* Checks if a given request can be made on-device.
*
* Encapsulates a few concerns:
* the mode
* API existence
* prompt formatting
* model availability, including triggering download if necessary
*
*
* Pros: callers needn't be concerned with details of on-device availability.</p>
* Cons: this method spans a few concerns and splits request validation from usage.
* If instance variables weren't already part of the API, we could consider a better
* separation of concerns.
*/
isAvailable(request: GenerateContentRequest): Promise<boolean>;
/**
* Generates content on device.
*
* @remarks
* This is comparable to {@link GenerativeModel.generateContent} for generating content in
* Cloud.
* @param request - a standard Firebase AI {@link GenerateContentRequest}
* @returns {@link Response}, so we can reuse common response formatting.
*/
generateContent(request: GenerateContentRequest): Promise<Response>;
/**
* Generates content stream on device.
*
* @remarks
* This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
* Cloud.
* @param request - a standard Firebase AI {@link GenerateContentRequest}
* @returns {@link Response}, so we can reuse common response formatting.
*/
generateContentStream(request: GenerateContentRequest): Promise<Response>;
countTokens(_request: CountTokensRequest): Promise<Response>;
/**
* Asserts inference for the given request can be performed by an on-device model.
*/
private static isOnDeviceRequest;
/**
* Encapsulates logic to get availability and download a model if one is downloadable.
*/
private downloadIfAvailable;
/**
* Triggers out-of-band download of an on-device model.
*
* Chrome only downloads models as needed. Chrome knows a model is needed when code calls
* LanguageModel.create.
*
* Since Chrome manages the download, the SDK can only avoid redundant download requests by
* tracking if a download has previously been requested.
*/
private download;
/**
* Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object.
*/
private static toLanguageModelMessage;
/**
* Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object.
*/
private static toLanguageModelMessageContent;
/**
* Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string.
*/
private static toLanguageModelMessageRole;
/**
* Abstracts Chrome session creation.
*
* Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all
* inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all
* inference.
*
* Chrome will remove a model from memory if it's no longer in use, so this method ensures a
* new session is created before an old session is destroyed.
*/
private createSession;
/**
* Formats string returned by Chrome as a {@link Response} returned by Firebase AI.
*/
private static toResponse;
/**
* Formats string stream returned by Chrome as SSE returned by Firebase AI.
*/
private static toStreamResponse;
}
/**
* Creates a ChromeAdapterImpl on demand.
*/
export declare function chromeAdapterFactory(mode: InferenceMode, window?: Window, params?: OnDeviceParams): ChromeAdapterImpl | undefined;

View File

@@ -0,0 +1,21 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { CountTokensRequest, CountTokensResponse, SingleRequestOptions, RequestOptions } from '../types';
import { ApiSettings } from '../types/internal';
import { ChromeAdapter } from '../types/chrome-adapter';
export declare function countTokensOnCloud(apiSettings: ApiSettings, model: string, params: CountTokensRequest, singleRequestOptions?: SingleRequestOptions): Promise<CountTokensResponse>;
export declare function countTokens(apiSettings: ApiSettings, model: string, params: CountTokensRequest, chromeAdapter?: ChromeAdapter, requestOptions?: RequestOptions): Promise<CountTokensResponse>;

View File

@@ -0,0 +1,25 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { GenerateContentRequest, GenerateContentResponse, GenerateContentResult, GenerateContentStreamResult, SingleRequestOptions } from '../types';
import { ApiSettings } from '../types/internal';
import { ChromeAdapter } from '../types/chrome-adapter';
export declare function generateContentStream(apiSettings: ApiSettings, model: string, params: GenerateContentRequest, chromeAdapter?: ChromeAdapter, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult & {
firstValue?: GenerateContentResponse;
}>;
export declare function templateGenerateContent(apiSettings: ApiSettings, templateId: string, templateParams: object, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
export declare function templateGenerateContentStream(apiSettings: ApiSettings, templateId: string, templateParams: object, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
export declare function generateContent(apiSettings: ApiSettings, model: string, params: GenerateContentRequest, chromeAdapter?: ChromeAdapter, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;

View File

@@ -0,0 +1,154 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { FunctionCall, FunctionResponse } from '../types';
import { LiveSession } from './live-session';
/**
* A controller for managing an active audio conversation.
*
* @beta
*/
export interface AudioConversationController {
/**
* Stops the audio conversation, closes the microphone connection, and
* cleans up resources. Returns a promise that resolves when cleanup is complete.
*/
stop: () => Promise<void>;
}
/**
* Options for {@link startAudioConversation}.
*
* @beta
*/
export interface StartAudioConversationOptions {
/**
* An async handler that is called when the model requests a function to be executed.
* The handler should perform the function call and return the result as a `Part`,
* which will then be sent back to the model.
*/
functionCallingHandler?: (functionCalls: FunctionCall[]) => Promise<FunctionResponse>;
}
/**
* Dependencies needed by the {@link AudioConversationRunner}.
*
* @internal
*/
interface RunnerDependencies {
audioContext: AudioContext;
mediaStream: MediaStream;
sourceNode: MediaStreamAudioSourceNode;
workletNode: AudioWorkletNode;
}
/**
* Encapsulates the core logic of an audio conversation.
*
* @internal
*/
export declare class AudioConversationRunner {
private readonly liveSession;
private readonly options;
private readonly deps;
/** A flag to indicate if the conversation has been stopped. */
private isStopped;
/** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */
private readonly stopDeferred;
/** A promise that tracks the lifecycle of the main `runReceiveLoop`. */
private readonly receiveLoopPromise;
/** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */
private readonly playbackQueue;
/** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */
private scheduledSources;
/** A high-precision timeline pointer for scheduling gapless audio playback. */
private nextStartTime;
/** A mutex to prevent the playback processing loop from running multiple times concurrently. */
private isPlaybackLoopRunning;
constructor(liveSession: LiveSession, options: StartAudioConversationOptions, deps: RunnerDependencies);
/**
* Stops the conversation and unblocks the main receive loop.
*/
stop(): Promise<void>;
/**
* Cleans up all audio resources (nodes, stream tracks, context) and marks the
* session as no longer in a conversation.
*/
private cleanup;
/**
* Adds audio data to the queue and ensures the playback loop is running.
*/
private enqueueAndPlay;
/**
* Stops all current and pending audio playback and clears the queue. This is
* called when the server indicates the model's speech was interrupted with
* `LiveServerContent.modelTurn.interrupted`.
*/
private interruptPlayback;
/**
* Processes the playback queue in a loop, scheduling each chunk in a gapless sequence.
*/
private processPlaybackQueue;
/**
* The main loop that listens for and processes messages from the server.
*/
private runReceiveLoop;
}
/**
* Starts a real-time, bidirectional audio conversation with the model. This helper function manages
* the complexities of microphone access, audio recording, playback, and interruptions.
*
* @remarks Important: This function must be called in response to a user gesture
* (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}.
*
* @example
* ```javascript
* const liveSession = await model.connect();
* let conversationController;
*
* // This function must be called from within a click handler.
* async function startConversation() {
* try {
* conversationController = await startAudioConversation(liveSession);
* } catch (e) {
* // Handle AI-specific errors
* if (e instanceof AIError) {
* console.error("AI Error:", e.message);
* }
* // Handle microphone permission and hardware errors
* else if (e instanceof DOMException) {
* console.error("Microphone Error:", e.message);
* }
* // Handle other unexpected errors
* else {
* console.error("An unexpected error occurred:", e);
* }
* }
* }
*
* // Later, to stop the conversation:
* // if (conversationController) {
* // await conversationController.stop();
* // }
* ```
*
* @param liveSession - An active {@link LiveSession} instance.
* @param options - Configuration options for the audio conversation.
* @returns A `Promise` that resolves with an {@link AudioConversationController}.
* @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`).
* @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions.
*
* @beta
*/
export declare function startAudioConversation(liveSession: LiveSession, options?: StartAudioConversationOptions): Promise<AudioConversationController>;
export {};

View File

@@ -0,0 +1,154 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { FunctionResponse, GenerativeContentBlob, LiveServerContent, LiveServerGoingAwayNotice, LiveServerToolCall, LiveServerToolCallCancellation, Part } from '../public-types';
import { WebSocketHandler } from '../websocket';
/**
* Represents an active, real-time, bidirectional conversation with the model.
*
* This class should only be instantiated by calling {@link LiveGenerativeModel.connect}.
*
* @beta
*/
export declare class LiveSession {
private webSocketHandler;
private serverMessages;
/**
* Indicates whether this Live session is closed.
*
* @beta
*/
isClosed: boolean;
/**
* Indicates whether this Live session is being controlled by an `AudioConversationController`.
*
* @beta
*/
inConversation: boolean;
/**
* @internal
*/
constructor(webSocketHandler: WebSocketHandler, serverMessages: AsyncGenerator<unknown>);
/**
* Sends content to the server.
*
* @param request - The message to send to the model.
* @param turnComplete - Indicates if the turn is complete. Defaults to false.
* @throws If this session has been closed.
*
* @beta
*/
send(request: string | Array<string | Part>, turnComplete?: boolean): Promise<void>;
/**
* Sends text to the server in realtime.
*
* @example
* ```javascript
* liveSession.sendTextRealtime("Hello, how are you?");
* ```
*
* @param text - The text data to send.
* @throws If this session has been closed.
*
* @beta
*/
sendTextRealtime(text: string): Promise<void>;
/**
* Sends audio data to the server in realtime.
*
* @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz
* little-endian.
*
* @example
* ```javascript
* // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian.
* const blob = { mimeType: "audio/pcm", data: pcmData };
* liveSession.sendAudioRealtime(blob);
* ```
*
* @param blob - The base64-encoded PCM data to send to the server in realtime.
* @throws If this session has been closed.
*
* @beta
*/
sendAudioRealtime(blob: GenerativeContentBlob): Promise<void>;
/**
* Sends video data to the server in realtime.
*
* @remarks The server requires that the video is sent as individual video frames at 1 FPS. It
* is recommended to set `mimeType` to `image/jpeg`.
*
* @example
* ```javascript
* // const videoFrame = ... base64-encoded JPEG data
* const blob = { mimeType: "image/jpeg", data: videoFrame };
* liveSession.sendVideoRealtime(blob);
* ```
* @param blob - The base64-encoded video data to send to the server in realtime.
* @throws If this session has been closed.
*
* @beta
*/
sendVideoRealtime(blob: GenerativeContentBlob): Promise<void>;
/**
* Sends function responses to the server.
*
* @param functionResponses - The function responses to send.
* @throws If this session has been closed.
*
* @beta
*/
sendFunctionResponses(functionResponses: FunctionResponse[]): Promise<void>;
/**
* Yields messages received from the server.
* This can only be used by one consumer at a time.
*
* @returns An `AsyncGenerator` that yields server messages as they arrive.
* @throws If the session is already closed, or if we receive a response that we don't support.
*
* @beta
*/
receive(): AsyncGenerator<LiveServerContent | LiveServerToolCall | LiveServerToolCallCancellation | LiveServerGoingAwayNotice>;
/**
* Closes this session.
* All methods on this session will throw an error once this resolves.
*
* @beta
*/
close(): Promise<void>;
/**
* Sends realtime input to the server.
*
* @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.
*
* @param mediaChunks - The media chunks to send.
* @throws If this session has been closed.
*
* @beta
*/
sendMediaChunks(mediaChunks: GenerativeContentBlob[]): Promise<void>;
/**
* @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.
*
* Sends a stream of {@link GenerativeContentBlob}.
*
* @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send.
* @throws If this session has been closed.
*
* @beta
*/
sendMediaStream(mediaChunkStream: ReadableStream<GenerativeContentBlob>): Promise<void>;
}

View File

@@ -0,0 +1,72 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { AI, BackendType } from '../public-types';
import { ApiSettings } from '../types/internal';
/**
* Base class for Firebase AI model APIs.
*
* Instances of this class are associated with a specific Firebase AI {@link Backend}
* and provide methods for interacting with the configured generative model.
*
* @public
*/
export declare abstract class AIModel {
/**
* The fully qualified model resource name to use for generating images
* (for example, `publishers/google/models/imagen-3.0-generate-002`).
*/
readonly model: string;
/**
* @internal
*/
_apiSettings: ApiSettings;
/**
* Constructs a new instance of the {@link AIModel} class.
*
* This constructor should only be called from subclasses that provide
* a model API.
*
* @param ai - an {@link AI} instance.
* @param modelName - The name of the model being used. It can be in one of the following formats:
* - `my-model` (short name, will resolve to `publishers/google/models/my-model`)
* - `models/my-model` (will resolve to `publishers/google/models/my-model`)
* - `publishers/my-publisher/models/my-model` (fully qualified model name)
*
* @throws If the `apiKey` or `projectId` fields are missing in your
* Firebase config.
*
* @internal
*/
protected constructor(ai: AI, modelName: string);
/**
* Normalizes the given model name to a fully qualified model resource name.
*
* @param modelName - The model name to normalize.
* @returns The fully qualified model resource name.
*
* @internal
*/
static normalizeModelName(modelName: string, backendType: BackendType): string;
/**
* @internal
*/
private static normalizeGoogleAIModelName;
/**
* @internal
*/
private static normalizeVertexAIModelName;
}

View File

@@ -0,0 +1,56 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Content, CountTokensRequest, CountTokensResponse, GenerateContentRequest, GenerateContentResult, GenerateContentStreamResult, GenerationConfig, ModelParams, Part, SafetySetting, RequestOptions, StartChatParams, Tool, ToolConfig, SingleRequestOptions } from '../types';
import { ChatSession } from '../methods/chat-session';
import { AI } from '../public-types';
import { AIModel } from './ai-model';
import { ChromeAdapter } from '../types/chrome-adapter';
/**
* Class for generative model APIs.
* @public
*/
export declare class GenerativeModel extends AIModel {
private chromeAdapter?;
generationConfig: GenerationConfig;
safetySettings: SafetySetting[];
requestOptions?: RequestOptions;
tools?: Tool[];
toolConfig?: ToolConfig;
systemInstruction?: Content;
constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions, chromeAdapter?: ChromeAdapter | undefined);
/**
* Makes a single non-streaming call to the model
* and returns an object containing a single {@link GenerateContentResponse}.
*/
generateContent(request: GenerateContentRequest | string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
/**
* Makes a single streaming call to the model
* and returns an object containing an iterable stream that iterates
* over all chunks in the streaming response as well as
* a promise that returns the final aggregated response.
*/
generateContentStream(request: GenerateContentRequest | string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
/**
* Gets a new {@link ChatSession} instance which can be used for
* multi-turn chats.
*/
startChat(startChatParams?: StartChatParams): ChatSession;
/**
* Counts the tokens in the provided request.
*/
countTokens(request: CountTokensRequest | string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<CountTokensResponse>;
}

View File

@@ -0,0 +1,102 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { AI } from '../public-types';
import { ImagenGCSImage, ImagenGenerationConfig, ImagenInlineImage, RequestOptions, ImagenModelParams, ImagenGenerationResponse, ImagenSafetySettings, SingleRequestOptions } from '../types';
import { AIModel } from './ai-model';
/**
* Class for Imagen model APIs.
*
* This class provides methods for generating images using the Imagen model.
*
* @example
* ```javascript
* const imagen = new ImagenModel(
* ai,
* {
* model: 'imagen-3.0-generate-002'
* }
* );
*
* const response = await imagen.generateImages('A photo of a cat');
* if (response.images.length > 0) {
* console.log(response.images[0].bytesBase64Encoded);
* }
* ```
*
* @public
*/
export declare class ImagenModel extends AIModel {
requestOptions?: RequestOptions | undefined;
/**
* The Imagen generation configuration.
*/
generationConfig?: ImagenGenerationConfig;
/**
* Safety settings for filtering inappropriate content.
*/
safetySettings?: ImagenSafetySettings;
/**
* Constructs a new instance of the {@link ImagenModel} class.
*
* @param ai - an {@link AI} instance.
* @param modelParams - Parameters to use when making requests to Imagen.
* @param requestOptions - Additional options to use when making requests.
*
* @throws If the `apiKey` or `projectId` fields are missing in your
* Firebase config.
*/
constructor(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined);
/**
* Generates images using the Imagen model and returns them as
* base64-encoded strings.
*
* @param prompt - A text prompt describing the image(s) to generate.
* @returns A promise that resolves to an {@link ImagenGenerationResponse}
* object containing the generated images.
*
* @throws If the request to generate images fails. This happens if the
* prompt is blocked.
*
* @remarks
* If the prompt was not blocked, but one or more of the generated images were filtered, the
* returned object will have a `filteredReason` property.
* If all images are filtered, the `images` array will be empty.
*
* @public
*/
generateImages(prompt: string, singleRequestOptions?: SingleRequestOptions): Promise<ImagenGenerationResponse<ImagenInlineImage>>;
/**
* Generates images to Cloud Storage for Firebase using the Imagen model.
*
* @internal This method is temporarily internal.
*
* @param prompt - A text prompt describing the image(s) to generate.
* @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket.
* This should be a directory. For example, `gs://my-bucket/my-directory/`.
* @returns A promise that resolves to an {@link ImagenGenerationResponse}
* object containing the URLs of the generated images.
*
* @throws If the request fails to generate images fails. This happens if
* the prompt is blocked.
*
* @remarks
* If the prompt was not blocked, but one or more of the generated images were filtered, the
* returned object will have a `filteredReason` property.
* If all images are filtered, the `images` array will be empty.
*/
generateImagesGCS(prompt: string, gcsURI: string, singleRequestOptions?: SingleRequestOptions): Promise<ImagenGenerationResponse<ImagenGCSImage>>;
}

View File

@@ -0,0 +1,20 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export * from './ai-model';
export * from './generative-model';
export * from './live-generative-model';
export * from './imagen-model';

View File

@@ -0,0 +1,55 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { AIModel } from './ai-model';
import { LiveSession } from '../methods/live-session';
import { AI, Content, LiveGenerationConfig, LiveModelParams, Tool, ToolConfig } from '../public-types';
import { WebSocketHandler } from '../websocket';
/**
* Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal
* interactions with Gemini.
*
* This class should only be instantiated with {@link getLiveGenerativeModel}.
*
* @beta
*/
export declare class LiveGenerativeModel extends AIModel {
/**
* @internal
*/
private _webSocketHandler;
generationConfig: LiveGenerationConfig;
tools?: Tool[];
toolConfig?: ToolConfig;
systemInstruction?: Content;
/**
* @internal
*/
constructor(ai: AI, modelParams: LiveModelParams,
/**
* @internal
*/
_webSocketHandler: WebSocketHandler);
/**
* Starts a {@link LiveSession}.
*
* @returns A {@link LiveSession}.
* @throws If the connection failed to be established with the server.
*
* @beta
*/
connect(): Promise<LiveSession>;
}

View File

@@ -0,0 +1,64 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { GenerateContentResult, RequestOptions } from '../types';
import { AI, GenerateContentStreamResult, SingleRequestOptions } from '../public-types';
import { ApiSettings } from '../types/internal';
/**
* {@link GenerativeModel} APIs that execute on a server-side template.
*
* This class should only be instantiated with {@link getTemplateGenerativeModel}.
*
* @beta
*/
export declare class TemplateGenerativeModel {
/**
* @internal
*/
_apiSettings: ApiSettings;
/**
* Additional options to use when making requests.
*/
requestOptions?: RequestOptions;
/**
* @hideconstructor
*/
constructor(ai: AI, requestOptions?: RequestOptions);
/**
* Makes a single non-streaming call to the model and returns an object
* containing a single {@link GenerateContentResponse}.
*
* @param templateId - The ID of the server-side template to execute.
* @param templateVariables - A key-value map of variables to populate the
* template with.
*
* @beta
*/
generateContent(templateId: string, templateVariables: object, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
/**
* Makes a single streaming call to the model and returns an object
* containing an iterable stream that iterates over all chunks in the
* streaming response as well as a promise that returns the final aggregated
* response.
*
* @param templateId - The ID of the server-side template to execute.
* @param templateVariables - A key-value map of variables to populate the
* template with.
*
* @beta
*/
generateContentStream(templateId: string, templateVariables: object, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
}

View File

@@ -0,0 +1,51 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { RequestOptions } from '../types';
import { AI, ImagenGenerationResponse, ImagenInlineImage, SingleRequestOptions } from '../public-types';
import { ApiSettings } from '../types/internal';
/**
* Class for Imagen model APIs that execute on a server-side template.
*
* This class should only be instantiated with {@link getTemplateImagenModel}.
*
* @beta
*/
export declare class TemplateImagenModel {
/**
* @internal
*/
_apiSettings: ApiSettings;
/**
* Additional options to use when making requests.
*/
requestOptions?: RequestOptions;
/**
* @hideconstructor
*/
constructor(ai: AI, requestOptions?: RequestOptions);
/**
* Makes a single call to the model and returns an object containing a single
* {@link ImagenGenerationResponse}.
*
* @param templateId - The ID of the server-side template to execute.
* @param templateVariables - A key-value map of variables to populate the
* template with.
*
* @beta
*/
generateImages(templateId: string, templateVariables: object, singleRequestOptions?: SingleRequestOptions): Promise<ImagenGenerationResponse<ImagenInlineImage>>;
}

View File

@@ -0,0 +1,26 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { AI } from '../public-types';
import { ApiSettings } from '../types/internal';
/**
* Initializes an {@link ApiSettings} object from an {@link AI} instance.
*
* If this is a Server App, the {@link ApiSettings} object's `getAppCheckToken()` will resolve
* with the `FirebaseServerAppSettings.appCheckToken`, instead of requiring that an App Check
* instance is initialized.
*/
export declare function initApiSettings(ai: AI): ApiSettings;

View File

@@ -0,0 +1,97 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { FirebaseApp } from '@firebase/app';
import { Backend } from './backend';
export * from './types';
/**
* An instance of the Firebase AI SDK.
*
* Do not create this instance directly. Instead, use {@link getAI | getAI()}.
*
* @public
*/
export interface AI {
/**
* The {@link @firebase/app#FirebaseApp} this {@link AI} instance is associated with.
*/
app: FirebaseApp;
/**
* A {@link Backend} instance that specifies the configuration for the target backend,
* either the Gemini Developer API (using {@link GoogleAIBackend}) or the
* Vertex AI Gemini API (using {@link VertexAIBackend}).
*/
backend: Backend;
/**
* Options applied to this {@link AI} instance.
*/
options?: AIOptions;
/**
* @deprecated use `AI.backend.location` instead.
*
* The location configured for this AI service instance, relevant for Vertex AI backends.
*/
location: string;
}
/**
* An enum-like object containing constants that represent the supported backends
* for the Firebase AI SDK.
* This determines which backend service (Vertex AI Gemini API or Gemini Developer API)
* the SDK will communicate with.
*
* These values are assigned to the `backendType` property within the specific backend
* configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify
* which service to target.
*
* @public
*/
export declare const BackendType: {
/**
* Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud.
* Use this constant when creating a {@link VertexAIBackend} configuration.
*/
readonly VERTEX_AI: "VERTEX_AI";
/**
* Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}).
* Use this constant when creating a {@link GoogleAIBackend} configuration.
*/
readonly GOOGLE_AI: "GOOGLE_AI";
};
/**
* Type alias representing valid backend types.
* It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`.
*
* @public
*/
export type BackendType = (typeof BackendType)[keyof typeof BackendType];
/**
* Options for initializing the AI service using {@link getAI | getAI()}.
* This allows specifying which backend to use (Vertex AI Gemini API or Gemini Developer API)
* and configuring its specific options (like location for Vertex AI).
*
* @public
*/
export interface AIOptions {
/**
* The backend configuration to use for the AI service instance.
* Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}).
*/
backend?: Backend;
/**
* Whether to use App Check limited use tokens. Defaults to false.
*/
useLimitedUseAppCheckTokens?: boolean;
}

View File

@@ -0,0 +1,33 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { GenerateContentRequest, ChromeAdapter, InferenceSource } from '../types';
interface CallResult<Response> {
response: Response;
inferenceSource: InferenceSource;
}
/**
* Dispatches a request to the appropriate backend (on-device or in-cloud)
* based on the inference mode.
*
* @param request - The request to be sent.
* @param chromeAdapter - The on-device model adapter.
* @param onDeviceCall - The function to call for on-device inference.
* @param inCloudCall - The function to call for in-cloud inference.
* @returns The response from the backend.
*/
export declare function callCloudOrDevice<Response>(request: GenerateContentRequest, chromeAdapter: ChromeAdapter | undefined, onDeviceCall: () => Promise<Response>, inCloudCall: () => Promise<Response>): Promise<CallResult<Response>>;
export {};

View File

@@ -0,0 +1,61 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Defines the image format for images generated by Imagen.
*
* Use this class to specify the desired format (JPEG or PNG) and compression quality
* for images generated by Imagen. This is typically included as part of
* {@link ImagenModelParams}.
*
* @example
* ```javascript
* const imagenModelParams = {
* // ... other ImagenModelParams
* imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75.
* }
* ```
*
* @public
*/
export declare class ImagenImageFormat {
/**
* The MIME type.
*/
mimeType: string;
/**
* The level of compression (a number between 0 and 100).
*/
compressionQuality?: number;
private constructor();
/**
* Creates an {@link ImagenImageFormat} for a JPEG image.
*
* @param compressionQuality - The level of compression (a number between 0 and 100).
* @returns An {@link ImagenImageFormat} object for a JPEG image.
*
* @public
*/
static jpeg(compressionQuality?: number): ImagenImageFormat;
/**
* Creates an {@link ImagenImageFormat} for a PNG image.
*
* @returns An {@link ImagenImageFormat} object for a PNG image.
*
* @public
*/
static png(): ImagenImageFormat;
}

View File

@@ -0,0 +1,28 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Content, GenerateContentRequest, Part } from '../types';
import { ImagenGenerationParams, PredictRequestBody } from '../types/internal';
export declare function formatSystemInstruction(input?: string | Part | Content): Content | undefined;
export declare function formatNewContent(request: string | Array<string | Part>): Content;
export declare function formatGenerateContentInput(params: GenerateContentRequest | string | Array<string | Part>): GenerateContentRequest;
/**
* Convert the user-defined parameters in {@link ImagenGenerationParams} to the format
* that is expected from the REST API.
*
* @internal
*/
export declare function createPredictRequestBody(prompt: string, { gcsURI, imageFormat, addWatermark, numberOfImages, negativePrompt, aspectRatio, safetyFilterLevel, personFilterLevel }: ImagenGenerationParams): PredictRequestBody;

View File

@@ -0,0 +1,69 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { SingleRequestOptions } from '../types';
import { ApiSettings } from '../types/internal';
export declare const TIMEOUT_EXPIRED_MESSAGE = "Timeout has expired.";
export declare const ABORT_ERROR_NAME = "AbortError";
export declare const enum Task {
GENERATE_CONTENT = "generateContent",
STREAM_GENERATE_CONTENT = "streamGenerateContent",
COUNT_TOKENS = "countTokens",
PREDICT = "predict"
}
export declare const enum ServerPromptTemplateTask {
TEMPLATE_GENERATE_CONTENT = "templateGenerateContent",
TEMPLATE_STREAM_GENERATE_CONTENT = "templateStreamGenerateContent",
TEMPLATE_PREDICT = "templatePredict"
}
interface BaseRequestURLParams {
apiSettings: ApiSettings;
stream: boolean;
singleRequestOptions?: SingleRequestOptions;
}
/**
* Parameters used to construct the URL of a request to use a model.
*/
interface ModelRequestURLParams extends BaseRequestURLParams {
task: Task;
model: string;
templateId?: never;
}
/**
* Parameters used to construct the URL of a request to use server side prompt templates.
*/
interface TemplateRequestURLParams extends BaseRequestURLParams {
task: ServerPromptTemplateTask;
templateId: string;
model?: never;
}
export declare class RequestURL {
readonly params: ModelRequestURLParams | TemplateRequestURLParams;
constructor(params: ModelRequestURLParams | TemplateRequestURLParams);
toString(): string;
private get pathname();
private get baseUrl();
private get queryParams();
}
export declare class WebSocketUrl {
apiSettings: ApiSettings;
constructor(apiSettings: ApiSettings);
toString(): string;
private get pathname();
}
export declare function getHeaders(url: RequestURL): Promise<Headers>;
export declare function makeRequest(requestUrlParams: TemplateRequestURLParams | ModelRequestURLParams, body: string): Promise<Response>;
export {};

View File

@@ -0,0 +1,57 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { EnhancedGenerateContentResponse, FunctionCall, GenerateContentResponse, ImagenGCSImage, ImagenInlineImage, InlineDataPart, Part, InferenceSource } from '../types';
/**
* Creates an EnhancedGenerateContentResponse object that has helper functions and
* other modifications that improve usability.
*/
export declare function createEnhancedContentResponse(response: GenerateContentResponse, inferenceSource?: InferenceSource): EnhancedGenerateContentResponse;
/**
* Adds convenience helper methods to a response object, including stream
* chunks (as long as each chunk is a complete GenerateContentResponse JSON).
*/
export declare function addHelpers(response: GenerateContentResponse): EnhancedGenerateContentResponse;
/**
* Returns all text from the first candidate's parts, filtering by whether
* `partFilter()` returns true.
*
* @param response - The `GenerateContentResponse` from which to extract text.
* @param partFilter - Only return `Part`s for which this returns true
*/
export declare function getText(response: GenerateContentResponse, partFilter: (part: Part) => boolean): string;
/**
* Returns every {@link FunctionCall} associated with first candidate.
*/
export declare function getFunctionCalls(response?: GenerateContentResponse): FunctionCall[] | undefined;
/**
* Returns every {@link InlineDataPart} in the first candidate if present.
*
* @internal
*/
export declare function getInlineDataParts(response: GenerateContentResponse): InlineDataPart[] | undefined;
export declare function formatBlockErrorMessage(response: GenerateContentResponse): string;
/**
* Convert a generic successful fetch response body to an Imagen response object
* that can be returned to the user. This converts the REST APIs response format to our
* APIs representation of a response.
*
* @internal
*/
export declare function handlePredictResponse<T extends ImagenInlineImage | ImagenGCSImage>(response: Response): Promise<{
images: T[];
filteredReason?: string;
}>;

View File

@@ -0,0 +1,170 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { SchemaInterface, SchemaType, SchemaParams, SchemaRequest } from '../types/schema';
/**
* Parent class encompassing all Schema types, with static methods that
* allow building specific Schema types. This class can be converted with
* `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints.
* (This string conversion is automatically done when calling SDK methods.)
* @public
*/
export declare abstract class Schema implements SchemaInterface {
/**
* Optional. The type of the property.
* This can only be undefined when using `anyOf` schemas, which do not have an
* explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification}.
*/
type?: SchemaType;
/** Optional. The format of the property.
* Supported formats:<br/>
* <ul>
* <li>for NUMBER type: "float", "double"</li>
* <li>for INTEGER type: "int32", "int64"</li>
* <li>for STRING type: "email", "byte", etc</li>
* </ul>
*/
format?: string;
/** Optional. The description of the property. */
description?: string;
/** Optional. The items of the property. */
items?: SchemaInterface;
/** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */
minItems?: number;
/** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */
maxItems?: number;
/** Optional. Whether the property is nullable. Defaults to false. */
nullable: boolean;
/** Optional. The example of the property. */
example?: unknown;
/**
* Allows user to add other schema properties that have not yet
* been officially added to the SDK.
*/
[key: string]: unknown;
constructor(schemaParams: SchemaInterface);
/**
* Defines how this Schema should be serialized as JSON.
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior
* @internal
*/
toJSON(): SchemaRequest;
static array(arrayParams: SchemaParams & {
items: Schema;
}): ArraySchema;
static object(objectParams: SchemaParams & {
properties: {
[k: string]: Schema;
};
optionalProperties?: string[];
}): ObjectSchema;
static string(stringParams?: SchemaParams): StringSchema;
static enumString(stringParams: SchemaParams & {
enum: string[];
}): StringSchema;
static integer(integerParams?: SchemaParams): IntegerSchema;
static number(numberParams?: SchemaParams): NumberSchema;
static boolean(booleanParams?: SchemaParams): BooleanSchema;
static anyOf(anyOfParams: SchemaParams & {
anyOf: TypedSchema[];
}): AnyOfSchema;
}
/**
* A type that includes all specific Schema types.
* @public
*/
export type TypedSchema = IntegerSchema | NumberSchema | StringSchema | BooleanSchema | ObjectSchema | ArraySchema | AnyOfSchema;
/**
* Schema class for "integer" types.
* @public
*/
export declare class IntegerSchema extends Schema {
constructor(schemaParams?: SchemaParams);
}
/**
* Schema class for "number" types.
* @public
*/
export declare class NumberSchema extends Schema {
constructor(schemaParams?: SchemaParams);
}
/**
* Schema class for "boolean" types.
* @public
*/
export declare class BooleanSchema extends Schema {
constructor(schemaParams?: SchemaParams);
}
/**
* Schema class for "string" types. Can be used with or without
* enum values.
* @public
*/
export declare class StringSchema extends Schema {
enum?: string[];
constructor(schemaParams?: SchemaParams, enumValues?: string[]);
/**
* @internal
*/
toJSON(): SchemaRequest;
}
/**
* Schema class for "array" types.
* The `items` param should refer to the type of item that can be a member
* of the array.
* @public
*/
export declare class ArraySchema extends Schema {
items: TypedSchema;
constructor(schemaParams: SchemaParams, items: TypedSchema);
/**
* @internal
*/
toJSON(): SchemaRequest;
}
/**
* Schema class for "object" types.
* The `properties` param must be a map of `Schema` objects.
* @public
*/
export declare class ObjectSchema extends Schema {
properties: {
[k: string]: TypedSchema;
};
optionalProperties: string[];
constructor(schemaParams: SchemaParams, properties: {
[k: string]: TypedSchema;
}, optionalProperties?: string[]);
/**
* @internal
*/
toJSON(): SchemaRequest;
}
/**
* Schema class representing a value that can conform to any of the provided sub-schemas. This is
* useful when a field can accept multiple distinct types or structures.
* @public
*/
export declare class AnyOfSchema extends Schema {
anyOf: TypedSchema[];
constructor(schemaParams: SchemaParams & {
anyOf: TypedSchema[];
});
/**
* @internal
*/
toJSON(): SchemaRequest;
}

View File

@@ -0,0 +1,39 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { GenerateContentResponse, GenerateContentStreamResult } from '../types';
import { ApiSettings } from '../types/internal';
import { InferenceSource } from '../public-types';
/**
* Process a response.body stream from the backend and return an
* iterator that provides one complete GenerateContentResponse at a time
* and a promise that resolves with a single aggregated
* GenerateContentResponse.
*
* @param response - Response from a fetch call
*/
export declare function processStream(response: Response, apiSettings: ApiSettings, inferenceSource?: InferenceSource): Promise<GenerateContentStreamResult & {
firstValue?: GenerateContentResponse;
}>;
/**
* Reads a raw string stream, buffers incomplete chunks, and yields parsed JSON objects.
*/
export declare function getResponseStream<T>(inputStream: ReadableStream<string>): ReadableStream<T>;
/**
* Aggregates an array of `GenerateContentResponse`s into a single
* GenerateContentResponse.
*/
export declare function aggregateResponses(responses: GenerateContentResponse[]): GenerateContentResponse;

35
node_modules/@firebase/ai/dist/esm/src/service.d.ts generated vendored Normal file
View File

@@ -0,0 +1,35 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { FirebaseApp, _FirebaseService } from '@firebase/app';
import { AI, AIOptions, ChromeAdapter, InferenceMode, OnDeviceParams } from './public-types';
import { AppCheckInternalComponentName, FirebaseAppCheckInternal } from '@firebase/app-check-interop-types';
import { Provider } from '@firebase/component';
import { FirebaseAuthInternal, FirebaseAuthInternalName } from '@firebase/auth-interop-types';
import { Backend } from './backend';
export declare class AIService implements AI, _FirebaseService {
app: FirebaseApp;
backend: Backend;
chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapter | undefined) | undefined;
auth: FirebaseAuthInternal | null;
appCheck: FirebaseAppCheckInternal | null;
_options?: Omit<AIOptions, 'backend'>;
location: string;
constructor(app: FirebaseApp, backend: Backend, authProvider?: Provider<FirebaseAuthInternalName>, appCheckProvider?: Provider<AppCheckInternalComponentName>, chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapter | undefined) | undefined);
_delete(): Promise<void>;
set options(optionsToSet: AIOptions);
get options(): AIOptions | undefined;
}

View File

@@ -0,0 +1,61 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { InferenceMode } from './enums';
import { CountTokensRequest, GenerateContentRequest } from './requests';
/**
* Defines an inference "backend" that uses Chrome's on-device model,
* and encapsulates logic for detecting when on-device inference is
* possible.
*
* These methods should not be called directly by the user.
*
* @beta
*/
export interface ChromeAdapter {
/**
* @internal
*/
mode: InferenceMode;
/**
* Checks if the on-device model is capable of handling a given
* request.
* @param request - A potential request to be passed to the model.
*/
isAvailable(request: GenerateContentRequest): Promise<boolean>;
/**
* Generates content using on-device inference.
*
* @remarks
* This is comparable to {@link GenerativeModel.generateContent} for generating
* content using in-cloud inference.
* @param request - a standard Firebase AI {@link GenerateContentRequest}
*/
generateContent(request: GenerateContentRequest): Promise<Response>;
/**
* Generates a content stream using on-device inference.
*
* @remarks
* This is comparable to {@link GenerativeModel.generateContentStream} for generating
* a content stream using in-cloud inference.
* @param request - a standard Firebase AI {@link GenerateContentRequest}
*/
generateContentStream(request: GenerateContentRequest): Promise<Response>;
/**
* @internal
*/
countTokens(request: CountTokensRequest): Promise<Response>;
}

View File

@@ -0,0 +1,266 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Language, Outcome, Role } from './enums';
/**
* Content type for both prompts and response candidates.
* @public
*/
export interface Content {
role: Role;
parts: Part[];
}
/**
* Content part - includes text, image/video, or function call/response
* part types.
* @public
*/
export type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart | ExecutableCodePart | CodeExecutionResultPart;
/**
* Content part interface if the part represents a text string.
* @public
*/
export interface TextPart {
text: string;
inlineData?: never;
functionCall?: never;
functionResponse?: never;
thought?: boolean;
/**
* @internal
*/
thoughtSignature?: string;
executableCode?: never;
codeExecutionResult?: never;
}
/**
* Content part interface if the part represents an image.
* @public
*/
export interface InlineDataPart {
text?: never;
inlineData: GenerativeContentBlob;
functionCall?: never;
functionResponse?: never;
/**
* Applicable if `inlineData` is a video.
*/
videoMetadata?: VideoMetadata;
thought?: boolean;
/**
* @internal
*/
thoughtSignature?: never;
executableCode?: never;
codeExecutionResult?: never;
}
/**
* Describes the input video content.
* @public
*/
export interface VideoMetadata {
/**
* The start offset of the video in
* protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format.
*/
startOffset: string;
/**
* The end offset of the video in
* protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format.
*/
endOffset: string;
}
/**
* Content part interface if the part represents a {@link FunctionCall}.
* @public
*/
export interface FunctionCallPart {
text?: never;
inlineData?: never;
functionCall: FunctionCall;
functionResponse?: never;
thought?: boolean;
/**
* @internal
*/
thoughtSignature?: never;
executableCode?: never;
codeExecutionResult?: never;
}
/**
* Content part interface if the part represents {@link FunctionResponse}.
* @public
*/
export interface FunctionResponsePart {
text?: never;
inlineData?: never;
functionCall?: never;
functionResponse: FunctionResponse;
thought?: boolean;
/**
* @internal
*/
thoughtSignature?: never;
executableCode?: never;
codeExecutionResult?: never;
}
/**
* Content part interface if the part represents {@link FileData}
* @public
*/
export interface FileDataPart {
text?: never;
inlineData?: never;
functionCall?: never;
functionResponse?: never;
fileData: FileData;
thought?: boolean;
/**
* @internal
*/
thoughtSignature?: never;
executableCode?: never;
codeExecutionResult?: never;
}
/**
* Represents the code that is executed by the model.
*
* @public
*/
export interface ExecutableCodePart {
text?: never;
inlineData?: never;
functionCall?: never;
functionResponse?: never;
fileData: never;
thought?: never;
/**
* @internal
*/
thoughtSignature?: never;
executableCode?: ExecutableCode;
codeExecutionResult?: never;
}
/**
* Represents the code execution result from the model.
*
* @public
*/
export interface CodeExecutionResultPart {
text?: never;
inlineData?: never;
functionCall?: never;
functionResponse?: never;
fileData: never;
thought?: never;
/**
* @internal
*/
thoughtSignature?: never;
executableCode?: never;
codeExecutionResult?: CodeExecutionResult;
}
/**
* An interface for executable code returned by the model.
*
* @public
*/
export interface ExecutableCode {
/**
* The programming language of the code.
*/
language?: Language;
/**
* The source code to be executed.
*/
code?: string;
}
/**
* The results of code execution run by the model.
*
* @public
*/
export interface CodeExecutionResult {
/**
* The result of the code execution.
*/
outcome?: Outcome;
/**
* The output from the code execution, or an error message
* if it failed.
*/
output?: string;
}
/**
* A predicted {@link FunctionCall} returned from the model
* that contains a string representing the {@link FunctionDeclaration.name}
* and a structured JSON object containing the parameters and their values.
* @public
*/
export interface FunctionCall {
/**
* The id of the function call. This must be sent back in the associated {@link FunctionResponse}.
*
*
* @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}).
* When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be
* `undefined`.
*/
id?: string;
name: string;
args: object;
}
/**
* The result output from a {@link FunctionCall} that contains a string
* representing the {@link FunctionDeclaration.name}
* and a structured JSON object containing any output
* from the function is used as context to the model.
* This should contain the result of a {@link FunctionCall}
* made based on model prediction.
* @public
*/
export interface FunctionResponse {
/**
* The id of the {@link FunctionCall}.
*
* @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}).
* When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be
* `undefined`.
*/
id?: string;
name: string;
response: object;
parts?: Part[];
}
/**
* Interface for sending an image.
* @public
*/
export interface GenerativeContentBlob {
mimeType: string;
/**
* Image as a base64 string.
*/
data: string;
}
/**
* Data pointing to a file uploaded on Google Cloud Storage.
* @public
*/
export interface FileData {
mimeType: string;
fileUri: string;
}

419
node_modules/@firebase/ai/dist/esm/src/types/enums.d.ts generated vendored Normal file
View File

@@ -0,0 +1,419 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Role is the producer of the content.
* @public
*/
export type Role = (typeof POSSIBLE_ROLES)[number];
/**
* Possible roles.
* @public
*/
export declare const POSSIBLE_ROLES: readonly ["user", "model", "function", "system"];
/**
* Harm categories that would cause prompts or candidates to be blocked.
* @public
*/
export declare const HarmCategory: {
readonly HARM_CATEGORY_HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH";
readonly HARM_CATEGORY_SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT";
readonly HARM_CATEGORY_HARASSMENT: "HARM_CATEGORY_HARASSMENT";
readonly HARM_CATEGORY_DANGEROUS_CONTENT: "HARM_CATEGORY_DANGEROUS_CONTENT";
};
/**
* Harm categories that would cause prompts or candidates to be blocked.
* @public
*/
export type HarmCategory = (typeof HarmCategory)[keyof typeof HarmCategory];
/**
* Threshold above which a prompt or candidate will be blocked.
* @public
*/
export declare const HarmBlockThreshold: {
/**
* Content with `NEGLIGIBLE` will be allowed.
*/
readonly BLOCK_LOW_AND_ABOVE: "BLOCK_LOW_AND_ABOVE";
/**
* Content with `NEGLIGIBLE` and `LOW` will be allowed.
*/
readonly BLOCK_MEDIUM_AND_ABOVE: "BLOCK_MEDIUM_AND_ABOVE";
/**
* Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed.
*/
readonly BLOCK_ONLY_HIGH: "BLOCK_ONLY_HIGH";
/**
* All content will be allowed.
*/
readonly BLOCK_NONE: "BLOCK_NONE";
/**
* All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding
* to the {@link (HarmCategory:type)} will not be present in the response.
*/
readonly OFF: "OFF";
};
/**
* Threshold above which a prompt or candidate will be blocked.
* @public
*/
export type HarmBlockThreshold = (typeof HarmBlockThreshold)[keyof typeof HarmBlockThreshold];
/**
* This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).
*
* @public
*/
export declare const HarmBlockMethod: {
/**
* The harm block method uses both probability and severity scores.
*/
readonly SEVERITY: "SEVERITY";
/**
* The harm block method uses the probability score.
*/
readonly PROBABILITY: "PROBABILITY";
};
/**
* This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).
*
* @public
*/
export type HarmBlockMethod = (typeof HarmBlockMethod)[keyof typeof HarmBlockMethod];
/**
* Probability that a prompt or candidate matches a harm category.
* @public
*/
export declare const HarmProbability: {
/**
* Content has a negligible chance of being unsafe.
*/
readonly NEGLIGIBLE: "NEGLIGIBLE";
/**
* Content has a low chance of being unsafe.
*/
readonly LOW: "LOW";
/**
* Content has a medium chance of being unsafe.
*/
readonly MEDIUM: "MEDIUM";
/**
* Content has a high chance of being unsafe.
*/
readonly HIGH: "HIGH";
};
/**
* Probability that a prompt or candidate matches a harm category.
* @public
*/
export type HarmProbability = (typeof HarmProbability)[keyof typeof HarmProbability];
/**
* Harm severity levels.
* @public
*/
export declare const HarmSeverity: {
/**
* Negligible level of harm severity.
*/
readonly HARM_SEVERITY_NEGLIGIBLE: "HARM_SEVERITY_NEGLIGIBLE";
/**
* Low level of harm severity.
*/
readonly HARM_SEVERITY_LOW: "HARM_SEVERITY_LOW";
/**
* Medium level of harm severity.
*/
readonly HARM_SEVERITY_MEDIUM: "HARM_SEVERITY_MEDIUM";
/**
* High level of harm severity.
*/
readonly HARM_SEVERITY_HIGH: "HARM_SEVERITY_HIGH";
/**
* Harm severity is not supported.
*
* @remarks
* The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback.
*/
readonly HARM_SEVERITY_UNSUPPORTED: "HARM_SEVERITY_UNSUPPORTED";
};
/**
* Harm severity levels.
* @public
*/
export type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity];
/**
* Reason that a prompt was blocked.
* @public
*/
export declare const BlockReason: {
/**
* Content was blocked by safety settings.
*/
readonly SAFETY: "SAFETY";
/**
* Content was blocked, but the reason is uncategorized.
*/
readonly OTHER: "OTHER";
/**
* Content was blocked because it contained terms from the terminology blocklist.
*/
readonly BLOCKLIST: "BLOCKLIST";
/**
* Content was blocked due to prohibited content.
*/
readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT";
};
/**
* Reason that a prompt was blocked.
* @public
*/
export type BlockReason = (typeof BlockReason)[keyof typeof BlockReason];
/**
* Reason that a candidate finished.
* @public
*/
export declare const FinishReason: {
/**
* Natural stop point of the model or provided stop sequence.
*/
readonly STOP: "STOP";
/**
* The maximum number of tokens as specified in the request was reached.
*/
readonly MAX_TOKENS: "MAX_TOKENS";
/**
* The candidate content was flagged for safety reasons.
*/
readonly SAFETY: "SAFETY";
/**
* The candidate content was flagged for recitation reasons.
*/
readonly RECITATION: "RECITATION";
/**
* Unknown reason.
*/
readonly OTHER: "OTHER";
/**
* The candidate content contained forbidden terms.
*/
readonly BLOCKLIST: "BLOCKLIST";
/**
* The candidate content potentially contained prohibited content.
*/
readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT";
/**
* The candidate content potentially contained Sensitive Personally Identifiable Information (SPII).
*/
readonly SPII: "SPII";
/**
* The function call generated by the model was invalid.
*/
readonly MALFORMED_FUNCTION_CALL: "MALFORMED_FUNCTION_CALL";
};
/**
* Reason that a candidate finished.
* @public
*/
export type FinishReason = (typeof FinishReason)[keyof typeof FinishReason];
/**
* @public
*/
export declare const FunctionCallingMode: {
/**
* Default model behavior; model decides to predict either a function call
* or a natural language response.
*/
readonly AUTO: "AUTO";
/**
* Model is constrained to always predicting a function call only.
* If `allowed_function_names` is set, the predicted function call will be
* limited to any one of `allowed_function_names`, else the predicted
* function call will be any one of the provided `function_declarations`.
*/
readonly ANY: "ANY";
/**
* Model will not predict any function call. Model behavior is same as when
* not passing any function declarations.
*/
readonly NONE: "NONE";
};
/**
* @public
*/
export type FunctionCallingMode = (typeof FunctionCallingMode)[keyof typeof FunctionCallingMode];
/**
* Content part modality.
* @public
*/
export declare const Modality: {
/**
* Unspecified modality.
*/
readonly MODALITY_UNSPECIFIED: "MODALITY_UNSPECIFIED";
/**
* Plain text.
*/
readonly TEXT: "TEXT";
/**
* Image.
*/
readonly IMAGE: "IMAGE";
/**
* Video.
*/
readonly VIDEO: "VIDEO";
/**
* Audio.
*/
readonly AUDIO: "AUDIO";
/**
* Document (for example, PDF).
*/
readonly DOCUMENT: "DOCUMENT";
};
/**
* Content part modality.
* @public
*/
export type Modality = (typeof Modality)[keyof typeof Modality];
/**
* Generation modalities to be returned in generation responses.
*
* @beta
*/
export declare const ResponseModality: {
/**
* Text.
* @beta
*/
readonly TEXT: "TEXT";
/**
* Image.
* @beta
*/
readonly IMAGE: "IMAGE";
/**
* Audio.
* @beta
*/
readonly AUDIO: "AUDIO";
};
/**
* Generation modalities to be returned in generation responses.
*
* @beta
*/
export type ResponseModality = (typeof ResponseModality)[keyof typeof ResponseModality];
/**
* Determines whether inference happens on-device or in-cloud.
*
* @remarks
* <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
* on-device model. If on-device inference is not available, the SDK
* will fall back to using a cloud-hosted model.
* <br/>
* <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
* on-device model. The SDK will not fall back to a cloud-hosted model.
* If on-device inference is not available, inference methods will throw.
* <br/>
* <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
* cloud-hosted model. The SDK will not fall back to an on-device model.
* <br/>
* <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
* cloud-hosted model. If not available, the SDK will fall back to an
* on-device model.
*
* @beta
*/
export declare const InferenceMode: {
readonly PREFER_ON_DEVICE: "prefer_on_device";
readonly ONLY_ON_DEVICE: "only_on_device";
readonly ONLY_IN_CLOUD: "only_in_cloud";
readonly PREFER_IN_CLOUD: "prefer_in_cloud";
};
/**
* Determines whether inference happens on-device or in-cloud.
*
* @beta
*/
export type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];
/**
* Indicates whether inference happened on-device or in-cloud.
*
* @beta
*/
export declare const InferenceSource: {
readonly ON_DEVICE: "on_device";
readonly IN_CLOUD: "in_cloud";
};
/**
* Indicates whether inference happened on-device or in-cloud.
*
* @beta
*/
export type InferenceSource = (typeof InferenceSource)[keyof typeof InferenceSource];
/**
* Represents the result of the code execution.
*
* @public
*/
export declare const Outcome: {
UNSPECIFIED: string;
OK: string;
FAILED: string;
DEADLINE_EXCEEDED: string;
};
/**
* Represents the result of the code execution.
*
* @public
*/
export type Outcome = (typeof Outcome)[keyof typeof Outcome];
/**
* The programming language of the code.
*
* @public
*/
export declare const Language: {
UNSPECIFIED: string;
PYTHON: string;
};
/**
* The programming language of the code.
*
* @public
*/
export type Language = (typeof Language)[keyof typeof Language];
/**
* A preset that controls the model's "thinking" process. Use
* `ThinkingLevel.LOW` for faster responses on less complex tasks, and
* `ThinkingLevel.HIGH` for better reasoning on more complex tasks.
*
* @public
*/
export declare const ThinkingLevel: {
MINIMAL: string;
LOW: string;
MEDIUM: string;
HIGH: string;
};
/**
* A preset that controls the model's "thinking" process. Use
* `ThinkingLevel.LOW` for faster responses on less complex tasks, and
* `ThinkingLevel.HIGH` for better reasoning on more complex tasks.
*
* @public
*/
export type ThinkingLevel = (typeof ThinkingLevel)[keyof typeof ThinkingLevel];

View File

@@ -0,0 +1,89 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { GenerateContentResponse } from './responses';
/**
* Details object that may be included in an error response.
*
* @public
*/
export interface ErrorDetails {
'@type'?: string;
/** The reason for the error. */
reason?: string;
/** The domain where the error occurred. */
domain?: string;
/** Additional metadata about the error. */
metadata?: Record<string, unknown>;
/** Any other relevant information about the error. */
[key: string]: unknown;
}
/**
* Details object that contains data originating from a bad HTTP response.
*
* @public
*/
export interface CustomErrorData {
/** HTTP status code of the error response. */
status?: number;
/** HTTP status text of the error response. */
statusText?: string;
/** Response from a {@link GenerateContentRequest} */
response?: GenerateContentResponse;
/** Optional additional details about the error. */
errorDetails?: ErrorDetails[];
}
/**
* Standardized error codes that {@link AIError} can have.
*
* @public
*/
export declare const AIErrorCode: {
/** A generic error occurred. */
readonly ERROR: "error";
/** An error occurred in a request. */
readonly REQUEST_ERROR: "request-error";
/** An error occurred in a response. */
readonly RESPONSE_ERROR: "response-error";
/** An error occurred while performing a fetch. */
readonly FETCH_ERROR: "fetch-error";
/** An error occurred because an operation was attempted on a closed session. */
readonly SESSION_CLOSED: "session-closed";
/** An error associated with a Content object. */
readonly INVALID_CONTENT: "invalid-content";
/** An error due to the Firebase API not being enabled in the Console. */
readonly API_NOT_ENABLED: "api-not-enabled";
/** An error due to invalid Schema input. */
readonly INVALID_SCHEMA: "invalid-schema";
/** An error occurred due to a missing Firebase API key. */
readonly NO_API_KEY: "no-api-key";
/** An error occurred due to a missing Firebase app ID. */
readonly NO_APP_ID: "no-app-id";
/** An error occurred due to a model name not being specified during initialization. */
readonly NO_MODEL: "no-model";
/** An error occurred due to a missing project ID. */
readonly NO_PROJECT_ID: "no-project-id";
/** An error occurred while parsing. */
readonly PARSE_FAILED: "parse-failed";
/** An error occurred due an attempt to use an unsupported feature. */
readonly UNSUPPORTED: "unsupported";
};
/**
* Standardized error codes that {@link AIError} can have.
*
* @public
*/
export type AIErrorCode = (typeof AIErrorCode)[keyof typeof AIErrorCode];

View File

@@ -0,0 +1,57 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Tool, GenerationConfig, Citation, FinishReason, GroundingMetadata, PromptFeedback, SafetyRating, UsageMetadata, URLContextMetadata } from '../public-types';
import { Content, Part } from './content';
/**
* @internal
*/
export interface GoogleAICountTokensRequest {
generateContentRequest: {
model: string;
contents: Content[];
systemInstruction?: string | Part | Content;
tools?: Tool[];
generationConfig?: GenerationConfig;
};
}
/**
* @internal
*/
export interface GoogleAIGenerateContentResponse {
candidates?: GoogleAIGenerateContentCandidate[];
promptFeedback?: PromptFeedback;
usageMetadata?: UsageMetadata;
}
/**
* @internal
*/
export interface GoogleAIGenerateContentCandidate {
index: number;
content: Content;
finishReason?: FinishReason;
finishMessage?: string;
safetyRatings?: SafetyRating[];
citationMetadata?: GoogleAICitationMetadata;
groundingMetadata?: GroundingMetadata;
urlContextMetadata?: URLContextMetadata;
}
/**
* @internal
*/
export interface GoogleAICitationMetadata {
citationSources: Citation[];
}

View File

@@ -0,0 +1,18 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export * from './requests';
export * from './responses';

View File

@@ -0,0 +1,134 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ImagenGenerationConfig, ImagenSafetySettings } from './requests';
/**
* A response from the REST API is expected to look like this in the success case:
* {
* "predictions": [
* {
* "mimeType": "image/png",
* "bytesBase64Encoded": "iVBORw0KG..."
* },
* {
* "mimeType": "image/png",
* "bytesBase64Encoded": "i4BOtw0KG..."
* }
* ]
* }
*
* And like this in the failure case:
* {
* "predictions": [
* {
* "raiFilteredReason": "..."
* }
* ]
* }
*
* @internal
*/
export interface ImagenResponseInternal {
predictions?: Array<{
/**
* The MIME type of the generated image.
*/
mimeType?: string;
/**
* The image data encoded as a base64 string.
*/
bytesBase64Encoded?: string;
/**
* The GCS URI where the image was stored.
*/
gcsUri?: string;
/**
* The reason why the image was filtered.
*/
raiFilteredReason?: string;
/**
* The safety attributes.
*
* This type is currently unused in the SDK. It is sent back because our requests set
* `includeSafetyAttributes`. This property is currently only used to avoid throwing an error
* when encountering this unsupported prediction type.
*/
safetyAttributes?: unknown;
}>;
}
/**
* The parameters to be sent in the request body of the HTTP call
* to the Vertex AI backend.
*
* We need a separate internal-only interface for this because the REST
* API expects different parameter names than what we show to our users.
*
* Sample request body JSON:
* {
* "instances": [
* {
* "prompt": "Portrait of a golden retriever on a beach."
* }
* ],
* "parameters": {
* "mimeType": "image/png",
* "safetyFilterLevel": "block_low_and_above",
* "personGeneration": "allow_all",
* "sampleCount": 2,
* "includeRaiReason": true,
* "includeSafetyAttributes": true,
* "aspectRatio": "9:16"
* }
* }
*
* See the Google Cloud docs: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api#-drest
*
* @internal
*/
export interface PredictRequestBody {
instances: [
{
prompt: string;
}
];
parameters: {
sampleCount: number;
aspectRatio?: string;
outputOptions?: {
mimeType: string;
compressionQuality?: number;
};
negativePrompt?: string;
storageUri?: string;
addWatermark?: boolean;
safetyFilterLevel?: string;
personGeneration?: string;
includeRaiReason: boolean;
includeSafetyAttributes: boolean;
};
}
/**
* Contains all possible REST API paramaters that are provided by the caller.
*
* @internal
*/
export type ImagenGenerationParams = {
/**
* The Cloud Storage for Firebase bucket URI where the images should be stored
* (for GCS requests only).
*/
gcsURI?: string;
} & ImagenGenerationConfig & ImagenSafetySettings;

View File

@@ -0,0 +1,245 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ImagenImageFormat } from '../../requests/imagen-image-format';
/**
* Parameters for configuring an {@link ImagenModel}.
*
* @public
*/
export interface ImagenModelParams {
/**
* The Imagen model to use for generating images.
* For example: `imagen-3.0-generate-002`.
*
* Only Imagen 3 models (named `imagen-3.0-*`) are supported.
*
* See {@link https://firebase.google.com/docs/vertex-ai/models | model versions}
* for a full list of supported Imagen 3 models.
*/
model: string;
/**
* Configuration options for generating images with Imagen.
*/
generationConfig?: ImagenGenerationConfig;
/**
* Safety settings for filtering potentially inappropriate content.
*/
safetySettings?: ImagenSafetySettings;
}
/**
* Configuration options for generating images with Imagen.
*
* See the {@link http://firebase.google.com/docs/vertex-ai/generate-images-imagen | documentation} for
* more details.
*
* @public
*/
export interface ImagenGenerationConfig {
/**
* A description of what should be omitted from the generated images.
*
* Support for negative prompts depends on the Imagen model.
*
* See the {@link http://firebase.google.com/docs/vertex-ai/model-parameters#imagen | documentation} for more details.
*
* This is no longer supported in the Gemini Developer API ({@link GoogleAIBackend}) in versions
* greater than `imagen-3.0-generate-002`.
*/
negativePrompt?: string;
/**
* The number of images to generate. The default value is 1.
*
* The number of sample images that may be generated in each request depends on the model
* (typically up to 4); see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">sampleCount</a>
* documentation for more details.
*/
numberOfImages?: number;
/**
* The aspect ratio of the generated images. The default value is square 1:1.
* Supported aspect ratios depend on the Imagen model, see {@link (ImagenAspectRatio:type)}
* for more details.
*/
aspectRatio?: ImagenAspectRatio;
/**
* The image format of the generated images. The default is PNG.
*
* See {@link ImagenImageFormat} for more details.
*/
imageFormat?: ImagenImageFormat;
/**
* Whether to add an invisible watermark to generated images.
*
* If set to `true`, an invisible SynthID watermark is embedded in generated images to indicate
* that they are AI generated. If set to `false`, watermarking will be disabled.
*
* For Imagen 3 models, the default value is `true`; see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">addWatermark</a>
* documentation for more details.
*
* When using the Gemini Developer API ({@link GoogleAIBackend}), this will default to true,
* and cannot be turned off.
*/
addWatermark?: boolean;
}
/**
* A filter level controlling how aggressively to filter sensitive content.
*
* Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI
* are assessed against a list of safety filters, which include 'harmful categories' (for example,
* `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to
* filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
* and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}
* for more details.
*
* @public
*/
export declare const ImagenSafetyFilterLevel: {
/**
* The most aggressive filtering level; most strict blocking.
*/
readonly BLOCK_LOW_AND_ABOVE: "block_low_and_above";
/**
* Blocks some sensitive prompts and responses.
*/
readonly BLOCK_MEDIUM_AND_ABOVE: "block_medium_and_above";
/**
* Blocks few sensitive prompts and responses.
*/
readonly BLOCK_ONLY_HIGH: "block_only_high";
/**
* The least aggressive filtering level; blocks very few sensitive prompts and responses.
*
* Access to this feature is restricted and may require your case to be reviewed and approved by
* Cloud support.
*/
readonly BLOCK_NONE: "block_none";
};
/**
* A filter level controlling how aggressively to filter sensitive content.
*
* Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI
* are assessed against a list of safety filters, which include 'harmful categories' (for example,
* `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to
* filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
* and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}
* for more details.
*
* @public
*/
export type ImagenSafetyFilterLevel = (typeof ImagenSafetyFilterLevel)[keyof typeof ImagenSafetyFilterLevel];
/**
* A filter level controlling whether generation of images containing people or faces is allowed.
*
* See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a>
* documentation for more details.
*
* @public
*/
export declare const ImagenPersonFilterLevel: {
/**
* Disallow generation of images containing people or faces; images of people are filtered out.
*/
readonly BLOCK_ALL: "dont_allow";
/**
* Allow generation of images containing adults only; images of children are filtered out.
*
* Generation of images containing people or faces may require your use case to be
* reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}
* for more details.
*/
readonly ALLOW_ADULT: "allow_adult";
/**
* Allow generation of images containing adults only; images of children are filtered out.
*
* Generation of images containing people or faces may require your use case to be
* reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}
* for more details.
*/
readonly ALLOW_ALL: "allow_all";
};
/**
* A filter level controlling whether generation of images containing people or faces is allowed.
*
* See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a>
* documentation for more details.
*
* @public
*/
export type ImagenPersonFilterLevel = (typeof ImagenPersonFilterLevel)[keyof typeof ImagenPersonFilterLevel];
/**
* Settings for controlling the aggressiveness of filtering out sensitive content.
*
* See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
* for more details.
*
* @public
*/
export interface ImagenSafetySettings {
/**
* A filter level controlling how aggressive to filter out sensitive content from generated
* images.
*/
safetyFilterLevel?: ImagenSafetyFilterLevel;
/**
* A filter level controlling whether generation of images containing people or faces is allowed.
*/
personFilterLevel?: ImagenPersonFilterLevel;
}
/**
* Aspect ratios for Imagen images.
*
* To specify an aspect ratio for generated images, set the `aspectRatio` property in your
* {@link ImagenGenerationConfig}.
*
* See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
* for more details and examples of the supported aspect ratios.
*
* @public
*/
export declare const ImagenAspectRatio: {
/**
* Square (1:1) aspect ratio.
*/
readonly SQUARE: "1:1";
/**
* Landscape (3:4) aspect ratio.
*/
readonly LANDSCAPE_3x4: "3:4";
/**
* Portrait (4:3) aspect ratio.
*/
readonly PORTRAIT_4x3: "4:3";
/**
* Landscape (16:9) aspect ratio.
*/
readonly LANDSCAPE_16x9: "16:9";
/**
* Portrait (9:16) aspect ratio.
*/
readonly PORTRAIT_9x16: "9:16";
};
/**
* Aspect ratios for Imagen images.
*
* To specify an aspect ratio for generated images, set the `aspectRatio` property in your
* {@link ImagenGenerationConfig}.
*
* See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
* for more details and examples of the supported aspect ratios.
*
* @public
*/
export type ImagenAspectRatio = (typeof ImagenAspectRatio)[keyof typeof ImagenAspectRatio];

View File

@@ -0,0 +1,79 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An image generated by Imagen, represented as inline data.
*
* @public
*/
export interface ImagenInlineImage {
/**
* The MIME type of the image; either `"image/png"` or `"image/jpeg"`.
*
* To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}.
*/
mimeType: string;
/**
* The base64-encoded image data.
*/
bytesBase64Encoded: string;
}
/**
* An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.
*
* This feature is not available yet.
* @public
*/
export interface ImagenGCSImage {
/**
* The MIME type of the image; either `"image/png"` or `"image/jpeg"`.
*
* To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}.
*/
mimeType: string;
/**
* The URI of the file stored in a Cloud Storage for Firebase bucket.
*
* @example `"gs://bucket-name/path/sample_0.jpg"`.
*/
gcsURI: string;
}
/**
* The response from a request to generate images with Imagen.
*
* @public
*/
export interface ImagenGenerationResponse<T extends ImagenInlineImage | ImagenGCSImage> {
/**
* The images generated by Imagen.
*
* The number of images generated may be fewer than the number requested if one or more were
* filtered out; see `filteredReason`.
*/
images: T[];
/**
* The reason that images were filtered out. This property will only be defined if one
* or more images were filtered.
*
* Images may be filtered out due to the {@link (ImagenSafetyFilterLevel:type)},
* {@link (ImagenPersonFilterLevel:type)}, or filtering included in the model.
* The filter levels may be adjusted in your {@link ImagenSafetySettings}.
*
* See the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen | Responsible AI and usage guidelines for Imagen}
* for more details.
*/
filteredReason?: string;
}

View File

@@ -0,0 +1,26 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export * from './content';
export * from './enums';
export * from './requests';
export * from './responses';
export * from './error';
export * from './schema';
export * from './imagen';
export * from './googleai';
export { LanguageModelCreateOptions, LanguageModelCreateCoreOptions, LanguageModelExpected, LanguageModelMessage, LanguageModelMessageContent, LanguageModelMessageContentValue, LanguageModelMessageRole, LanguageModelMessageType, LanguageModelPromptOptions } from './language-model';
export * from './chrome-adapter';

View File

@@ -0,0 +1,35 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { AppCheckTokenResult } from '@firebase/app-check-interop-types';
import { FirebaseAuthTokenData } from '@firebase/auth-interop-types';
import { Backend } from '../backend';
import { InferenceMode } from './enums';
export * from './imagen/internal';
export interface ApiSettings {
apiKey: string;
project: string;
appId: string;
automaticDataCollectionEnabled?: boolean;
/**
* @deprecated Use `backend.location` instead.
*/
location: string;
backend: Backend;
getAuthToken?: () => Promise<FirebaseAuthTokenData | null>;
getAppCheckToken?: () => Promise<AppCheckTokenResult>;
inferenceMode?: InferenceMode;
}

View File

@@ -0,0 +1,107 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The subset of the Prompt API
* (see {@link https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl }
* required for hybrid functionality.
*
* @internal
*/
export interface LanguageModel extends EventTarget {
create(options?: LanguageModelCreateOptions): Promise<LanguageModel>;
availability(options?: LanguageModelCreateCoreOptions): Promise<Availability>;
prompt(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<string>;
promptStreaming(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): ReadableStream;
measureInputUsage(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<number>;
destroy(): undefined;
}
/**
* @internal
*/
export declare enum Availability {
'UNAVAILABLE' = "unavailable",
'DOWNLOADABLE' = "downloadable",
'DOWNLOADING' = "downloading",
'AVAILABLE' = "available"
}
/**
* Configures the creation of an on-device language model session.
* @beta
*/
export interface LanguageModelCreateCoreOptions {
topK?: number;
temperature?: number;
expectedInputs?: LanguageModelExpected[];
}
/**
* Configures the creation of an on-device language model session.
* @beta
*/
export interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions {
signal?: AbortSignal;
initialPrompts?: LanguageModelMessage[];
}
/**
* Options for an on-device language model prompt.
* @beta
*/
export interface LanguageModelPromptOptions {
responseConstraint?: object;
}
/**
* Options for the expected inputs for an on-device language model.
* @beta
*/ export interface LanguageModelExpected {
type: LanguageModelMessageType;
languages?: string[];
}
/**
* An on-device language model prompt.
* @beta
*/
export type LanguageModelPrompt = LanguageModelMessage[];
/**
* An on-device language model message.
* @beta
*/
export interface LanguageModelMessage {
role: LanguageModelMessageRole;
content: LanguageModelMessageContent[];
}
/**
* An on-device language model content object.
* @beta
*/
export interface LanguageModelMessageContent {
type: LanguageModelMessageType;
value: LanguageModelMessageContentValue;
}
/**
* Allowable roles for on-device language model usage.
* @beta
*/
export type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
/**
* Allowable types for on-device language model messages.
* @beta
*/
export type LanguageModelMessageType = 'text' | 'image' | 'audio';
/**
* Content formats that can be provided as on-device message content.
* @beta
*/
export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string;

View File

@@ -0,0 +1,79 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Content, FunctionResponse, GenerativeContentBlob, Part } from './content';
import { AudioTranscriptionConfig, LiveGenerationConfig, Tool, ToolConfig } from './requests';
import { Transcription } from './responses';
/**
* User input that is sent to the model.
*
* @internal
*/
export interface _LiveClientContent {
clientContent: {
turns: [Content];
turnComplete: boolean;
inputTranscription?: Transcription;
outputTranscription?: Transcription;
};
}
/**
* User input that is sent to the model in real time.
*
* @internal
*/
export interface _LiveClientRealtimeInput {
realtimeInput: {
text?: string;
audio?: GenerativeContentBlob;
video?: GenerativeContentBlob;
/**
* @deprecated Use `text`, `audio`, and `video` instead.
*/
mediaChunks?: GenerativeContentBlob[];
};
}
/**
* Function responses that are sent to the model in real time.
*/
export interface _LiveClientToolResponse {
toolResponse: {
functionResponses: FunctionResponse[];
};
}
/**
* The first message in a Live session, used to configure generation options.
*
* @internal
*/
export interface _LiveClientSetup {
setup: {
model: string;
generationConfig?: _LiveGenerationConfig;
tools?: Tool[];
toolConfig?: ToolConfig;
systemInstruction?: string | Part | Content;
inputAudioTranscription?: AudioTranscriptionConfig;
outputAudioTranscription?: AudioTranscriptionConfig;
};
}
/**
* The Live Generation Config.
*
* The public API ({@link LiveGenerationConfig}) has `inputAudioTranscription` and `outputAudioTranscription`,
* but the server expects these fields to be in the top-level `setup` message. This was a conscious API decision.
*/
export type _LiveGenerationConfig = Omit<LiveGenerationConfig, 'inputAudioTranscription' | 'outputAudioTranscription'>;

View File

@@ -0,0 +1,543 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ObjectSchema, TypedSchema } from '../requests/schema-builder';
import { Content, Part } from './content';
import { LanguageModelCreateOptions, LanguageModelPromptOptions } from './language-model';
import { FunctionCallingMode, HarmBlockMethod, HarmBlockThreshold, HarmCategory, InferenceMode, ResponseModality, ThinkingLevel } from './enums';
import { ObjectSchemaRequest, SchemaRequest } from './schema';
/**
* Base parameters for a number of methods.
* @public
*/
export interface BaseParams {
safetySettings?: SafetySetting[];
generationConfig?: GenerationConfig;
}
/**
* Params passed to {@link getGenerativeModel}.
* @public
*/
export interface ModelParams extends BaseParams {
model: string;
tools?: Tool[];
toolConfig?: ToolConfig;
systemInstruction?: string | Part | Content;
}
/**
* Params passed to {@link getLiveGenerativeModel}.
* @beta
*/
export interface LiveModelParams {
model: string;
generationConfig?: LiveGenerationConfig;
tools?: Tool[];
toolConfig?: ToolConfig;
systemInstruction?: string | Part | Content;
}
/**
* Request sent through {@link GenerativeModel.generateContent}
* @public
*/
export interface GenerateContentRequest extends BaseParams {
contents: Content[];
tools?: Tool[];
toolConfig?: ToolConfig;
systemInstruction?: string | Part | Content;
}
/**
* Safety setting that can be sent as part of request parameters.
* @public
*/
export interface SafetySetting {
category: HarmCategory;
threshold: HarmBlockThreshold;
/**
* The harm block method.
*
* This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
* When using the Gemini Developer API ({@link GoogleAIBackend}), an {@link AIError} will be
* thrown if this property is defined.
*/
method?: HarmBlockMethod;
}
/**
* Config options for content-related requests
* @public
*/
export interface GenerationConfig {
candidateCount?: number;
stopSequences?: string[];
maxOutputTokens?: number;
temperature?: number;
topP?: number;
topK?: number;
presencePenalty?: number;
frequencyPenalty?: number;
/**
* Output response MIME type of the generated candidate text.
* Supported MIME types are `text/plain` (default, text output),
* `application/json` (JSON response in the candidates), and
* `text/x.enum`.
*/
responseMimeType?: string;
/**
* Output response schema of the generated candidate text. This
* value can be a class generated with a {@link Schema} static method
* like `Schema.string()` or `Schema.object()` or it can be a plain
* JS object matching the {@link SchemaRequest} interface.
* <br/>Note: This only applies when the specified `responseMimeType` supports a schema; currently
* this is limited to `application/json` and `text/x.enum`.
*/
responseSchema?: TypedSchema | SchemaRequest;
/**
* Generation modalities to be returned in generation responses.
*
* @remarks
* - Multimodal response generation is only supported by some Gemini models and versions; see {@link https://firebase.google.com/docs/vertex-ai/models | model versions}.
* - Only image generation (`ResponseModality.IMAGE`) is supported.
*
* @beta
*/
responseModalities?: ResponseModality[];
/**
* Configuration for "thinking" behavior of compatible Gemini models.
*/
thinkingConfig?: ThinkingConfig;
}
/**
* Configuration parameters used by {@link LiveGenerativeModel} to control live content generation.
*
* @beta
*/
export interface LiveGenerationConfig {
/**
* Configuration for speech synthesis.
*/
speechConfig?: SpeechConfig;
/**
* Specifies the maximum number of tokens that can be generated in the response. The number of
* tokens per word varies depending on the language outputted. Is unbounded by default.
*/
maxOutputTokens?: number;
/**
* Controls the degree of randomness in token selection. A `temperature` value of 0 means that the highest
* probability tokens are always selected. In this case, responses for a given prompt are mostly
* deterministic, but a small amount of variation is still possible.
*/
temperature?: number;
/**
* Changes how the model selects tokens for output. Tokens are
* selected from the most to least probable until the sum of their probabilities equals the `topP`
* value. For example, if tokens A, B, and C have probabilities of 0.3, 0.2, and 0.1 respectively
* and the `topP` value is 0.5, then the model will select either A or B as the next token by using
* the `temperature` and exclude C as a candidate. Defaults to 0.95 if unset.
*/
topP?: number;
/**
* Changes how the model selects token for output. A `topK` value of 1 means the select token is
* the most probable among all tokens in the model's vocabulary, while a `topK` value 3 means that
* the next token is selected from among the 3 most probably using probabilities sampled. Tokens
* are then further filtered with the highest selected `temperature` sampling. Defaults to 40
* if unspecified.
*/
topK?: number;
/**
* Positive penalties.
*/
presencePenalty?: number;
/**
* Frequency penalties.
*/
frequencyPenalty?: number;
/**
* The modalities of the response.
*/
responseModalities?: ResponseModality[];
/**
* Enables transcription of audio input.
*
* When enabled, the model will respond with transcriptions of your audio input in the `inputTranscriptions` property
* in {@link LiveServerContent} messages. Note that the transcriptions are broken up across
* messages, so you may only receive small amounts of text per message. For example, if you ask the model
* "How are you today?", the model may transcribe that input across three messages, broken up as "How a", "re yo", "u today?".
*/
inputAudioTranscription?: AudioTranscriptionConfig;
/**
* Enables transcription of audio input.
*
* When enabled, the model will respond with transcriptions of its audio output in the `outputTranscription` property
* in {@link LiveServerContent} messages. Note that the transcriptions are broken up across
* messages, so you may only receive small amounts of text per message. For example, if the model says
* "How are you today?", the model may transcribe that output across three messages, broken up as "How a", "re yo", "u today?".
*/
outputAudioTranscription?: AudioTranscriptionConfig;
}
/**
* Params for {@link GenerativeModel.startChat}.
* @public
*/
export interface StartChatParams extends BaseParams {
history?: Content[];
tools?: Tool[];
toolConfig?: ToolConfig;
systemInstruction?: string | Part | Content;
}
/**
* Params for calling {@link GenerativeModel.countTokens}
* @public
*/
export interface CountTokensRequest {
contents: Content[];
/**
* Instructions that direct the model to behave a certain way.
*/
systemInstruction?: string | Part | Content;
/**
* {@link Tool} configuration.
*/
tools?: Tool[];
/**
* Configuration options that control how the model generates a response.
*/
generationConfig?: GenerationConfig;
}
/**
* Params passed to {@link getGenerativeModel}.
* @public
*/
export interface RequestOptions {
/**
* Request timeout in milliseconds. Defaults to 180 seconds (180000ms).
*/
timeout?: number;
/**
* Base url for endpoint. Defaults to
* https://firebasevertexai.googleapis.com, which is the
* {@link https://console.cloud.google.com/apis/library/firebasevertexai.googleapis.com?project=_ | Firebase AI Logic API}
* (used regardless of your chosen Gemini API provider).
*/
baseUrl?: string;
/**
* Limits amount of sequential function calls the SDK can make during automatic
* function calling, in order to prevent infinite loops. If not specified,
* this value defaults to 10.
*
* When it reaches this limit, it will return the last response received
* from the model, whether it is a text response or further function calls.
*/
maxSequentalFunctionCalls?: number;
}
/**
* Options that can be provided per-request.
* Extends the base {@link RequestOptions} (like `timeout` and `baseUrl`)
* with request-specific controls like cancellation via `AbortSignal`.
*
* Options specified here will override any default {@link RequestOptions}
* configured on a model (for example, {@link GenerativeModel}).
*
* @public
*/
export interface SingleRequestOptions extends RequestOptions {
/**
* An `AbortSignal` instance that allows cancelling ongoing requests (like `generateContent` or
* `generateImages`).
*
* If provided, calling `abort()` on the corresponding `AbortController`
* will attempt to cancel the underlying HTTP request. An `AbortError` will be thrown
* if cancellation is successful.
*
* Note that this will not cancel the request in the backend, so any applicable billing charges
* will still be applied despite cancellation.
*
* @example
* ```javascript
* const controller = new AbortController();
* const model = getGenerativeModel({
* // ...
* });
* model.generateContent(
* "Write a story about a magic backpack.",
* { signal: controller.signal }
* );
*
* // To cancel request:
* controller.abort();
* ```
* @see https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal
*/
signal?: AbortSignal;
}
/**
* Defines a tool that model can call to access external knowledge.
* @public
*/
export type Tool = FunctionDeclarationsTool | GoogleSearchTool | CodeExecutionTool | URLContextTool;
/**
* Structured representation of a function declaration as defined by the
* {@link https://spec.openapis.org/oas/v3.0.3 | OpenAPI 3.0 specification}.
* Included
* in this declaration are the function name and parameters. This
* `FunctionDeclaration` is a representation of a block of code that can be used
* as a Tool by the model and executed by the client.
* @public
*/
export interface FunctionDeclaration {
/**
* The name of the function to call. Must start with a letter or an
* underscore. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with
* a max length of 64.
*/
name: string;
/**
* Description and purpose of the function. Model uses it to decide
* how and whether to call the function.
*/
description: string;
/**
* Optional. Describes the parameters to this function in JSON Schema Object
* format. Reflects the Open API 3.03 Parameter Object. Parameter names are
* case-sensitive. For a function with no parameters, this can be left unset.
*/
parameters?: ObjectSchema | ObjectSchemaRequest;
/**
* Reference to an actual function to call. Specifying this will cause the
* function to be called automatically when requested by the model.
*/
functionReference?: Function;
}
/**
* A tool that allows a Gemini model to connect to Google Search to access and incorporate
* up-to-date information from the web into its responses.
*
* Important: If using Grounding with Google Search, you are required to comply with the
* "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
* or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}
* section within the Service Specific Terms).
*
* @public
*/
export interface GoogleSearchTool {
/**
* Specifies the Google Search configuration.
* Currently, this is an empty object, but it's reserved for future configuration options.
*
* When using this feature, you are required to comply with the "Grounding with Google Search"
* usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
* or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}
* section within the Service Specific Terms).
*/
googleSearch: GoogleSearch;
}
/**
* A tool that enables the model to use code execution.
*
* @beta
*/
export interface CodeExecutionTool {
/**
* Specifies the Google Search configuration.
* Currently, this is an empty object, but it's reserved for future configuration options.
*/
codeExecution: {};
}
/**
* Specifies the Google Search configuration.
*
* @remarks Currently, this is an empty object, but it's reserved for future configuration options.
*
* @public
*/
export interface GoogleSearch {
}
/**
* A tool that allows you to provide additional context to the models in the form of public web
* URLs. By including URLs in your request, the Gemini model will access the content from those
* pages to inform and enhance its response.
*
* @beta
*/
export interface URLContextTool {
/**
* Specifies the URL Context configuration.
*/
urlContext: URLContext;
}
/**
* Specifies the URL Context configuration.
*
* @beta
*/
export interface URLContext {
}
/**
* A `FunctionDeclarationsTool` is a piece of code that enables the system to
* interact with external systems to perform an action, or set of actions,
* outside of knowledge and scope of the model.
* @public
*/
export interface FunctionDeclarationsTool {
/**
* Optional. One or more function declarations
* to be passed to the model along with the current user query. Model may
* decide to call a subset of these functions by populating
* {@link FunctionCall} in the response. User should
* provide a {@link FunctionResponse} for each
* function call in the next turn. Based on the function responses, the model will
* generate the final response back to the user. Maximum 64 function
* declarations can be provided.
*/
functionDeclarations?: FunctionDeclaration[];
}
/**
* Tool config. This config is shared for all tools provided in the request.
* @public
*/
export interface ToolConfig {
functionCallingConfig?: FunctionCallingConfig;
}
/**
* @public
*/
export interface FunctionCallingConfig {
mode?: FunctionCallingMode;
allowedFunctionNames?: string[];
}
/**
* Encapsulates configuration for on-device inference.
*
* @beta
*/
export interface OnDeviceParams {
createOptions?: LanguageModelCreateOptions;
promptOptions?: LanguageModelPromptOptions;
}
/**
* Configures hybrid inference.
* @beta
*/
export interface HybridParams {
/**
* Specifies on-device or in-cloud inference. Defaults to prefer on-device.
*/
mode: InferenceMode;
/**
* Optional. Specifies advanced params for on-device inference.
*/
onDeviceParams?: OnDeviceParams;
/**
* Optional. Specifies advanced params for in-cloud inference.
*/
inCloudParams?: ModelParams;
}
/**
* Configuration for "thinking" behavior of compatible Gemini models.
*
* Certain models utilize a thinking process before generating a response. This allows them to
* reason through complex problems and plan a more coherent and accurate answer.
*
* @public
*/
export interface ThinkingConfig {
/**
* The thinking budget, in tokens.
*
* @remarks
* This parameter sets an upper limit on the number of tokens the model can use for its internal
* "thinking" process. A higher budget may result in higher quality responses for complex tasks
* but can also increase latency and cost.
*
* The range of supported thinking budget values depends on the model.
*
* <ul>
* <li>To use the default thinking budget for a model, leave
* this value undefined.</li>
*
* <li>To disable thinking, when supported by the model, set this value
* to `0`.</li>
*
* <li>To use dynamic thinking, which allows the model to decide on the thinking
* budget based on the task, set this value to `-1`.</li>
* </ul>
*
* An error will be thrown if you set a thinking budget for a model that does not support this
* feature or if the specified budget is not within the model's supported range.
*
* The model will also error if `thinkingLevel` and `thinkingBudget` are
* both set.
*/
thinkingBudget?: number;
/**
* If not specified, Gemini will use the model's default dynamic thinking level.
*
* @remarks
* Note: The model will error if `thinkingLevel` and `thinkingBudget` are
* both set.
*
* Important: Gemini 2.5 series models do not support thinking levels; use
* `thinkingBudget` to set a thinking budget instead.
*/
thinkingLevel?: ThinkingLevel;
/**
* Whether to include "thought summaries" in the model's response.
*
* @remarks
* Thought summaries provide a brief overview of the model's internal thinking process,
* offering insight into how it arrived at the final answer. This can be useful for
* debugging, understanding the model's reasoning, and verifying its accuracy.
*/
includeThoughts?: boolean;
}
/**
* Configuration for a pre-built voice.
*
* @beta
*/
export interface PrebuiltVoiceConfig {
/**
* The voice name to use for speech synthesis.
*
* For a full list of names and demos of what each voice sounds like, see {@link https://cloud.google.com/text-to-speech/docs/chirp3-hd | Chirp 3: HD Voices}.
*/
voiceName?: string;
}
/**
* Configuration for the voice to used in speech synthesis.
*
* @beta
*/
export interface VoiceConfig {
/**
* Configures the voice using a pre-built voice configuration.
*/
prebuiltVoiceConfig?: PrebuiltVoiceConfig;
}
/**
* Configures speech synthesis.
*
* @beta
*/
export interface SpeechConfig {
/**
* Configures the voice to be used in speech synthesis.
*/
voiceConfig?: VoiceConfig;
}
/**
* The audio transcription configuration.
*/
export interface AudioTranscriptionConfig {
}

View File

@@ -0,0 +1,607 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Content, FunctionCall, InlineDataPart } from './content';
import { BlockReason, FinishReason, HarmCategory, HarmProbability, HarmSeverity, InferenceSource, Modality } from './enums';
/**
* Result object returned from {@link GenerativeModel.generateContent} call.
*
* @public
*/
export interface GenerateContentResult {
response: EnhancedGenerateContentResponse;
}
/**
* Result object returned from {@link GenerativeModel.generateContentStream} call.
* Iterate over `stream` to get chunks as they come in and/or
* use the `response` promise to get the aggregated response when
* the stream is done.
*
* @public
*/
export interface GenerateContentStreamResult {
stream: AsyncGenerator<EnhancedGenerateContentResponse>;
response: Promise<EnhancedGenerateContentResponse>;
}
/**
* Response object wrapped with helper methods.
*
* @public
*/
export interface EnhancedGenerateContentResponse extends GenerateContentResponse {
/**
* Returns the text string from the response, if available.
* Throws if the prompt or candidate was blocked.
*/
text: () => string;
/**
* Aggregates and returns every {@link InlineDataPart} from the first candidate of
* {@link GenerateContentResponse}.
*
* @throws If the prompt or candidate was blocked.
*/
inlineDataParts: () => InlineDataPart[] | undefined;
/**
* Aggregates and returns every {@link FunctionCall} from the first candidate of
* {@link GenerateContentResponse}.
*
* @throws If the prompt or candidate was blocked.
*/
functionCalls: () => FunctionCall[] | undefined;
/**
* Aggregates and returns every {@link TextPart} with their `thought` property set
* to `true` from the first candidate of {@link GenerateContentResponse}.
*
* @throws If the prompt or candidate was blocked.
*
* @remarks
* Thought summaries provide a brief overview of the model's internal thinking process,
* offering insight into how it arrived at the final answer. This can be useful for
* debugging, understanding the model's reasoning, and verifying its accuracy.
*
* Thoughts will only be included if {@link ThinkingConfig.includeThoughts} is
* set to `true`.
*/
thoughtSummary: () => string | undefined;
/**
* Indicates whether inference happened on-device or in-cloud.
*
* @beta
*/
inferenceSource?: InferenceSource;
}
/**
* Individual response from {@link GenerativeModel.generateContent} and
* {@link GenerativeModel.generateContentStream}.
* `generateContentStream()` will return one in each chunk until
* the stream is done.
* @public
*/
export interface GenerateContentResponse {
candidates?: GenerateContentCandidate[];
promptFeedback?: PromptFeedback;
usageMetadata?: UsageMetadata;
}
/**
* Usage metadata about a {@link GenerateContentResponse}.
*
* @public
*/
export interface UsageMetadata {
promptTokenCount: number;
candidatesTokenCount: number;
/**
* The number of tokens used by the model's internal "thinking" process.
*/
thoughtsTokenCount?: number;
totalTokenCount: number;
/**
* The number of tokens used by tools.
*/
toolUsePromptTokenCount?: number;
promptTokensDetails?: ModalityTokenCount[];
candidatesTokensDetails?: ModalityTokenCount[];
/**
* A list of tokens used by tools, broken down by modality.
*/
toolUsePromptTokensDetails?: ModalityTokenCount[];
/**
* The number of tokens in the prompt that were served from the cache.
* If implicit caching is not active or no content was cached,
* this will be 0.
*/
cachedContentTokenCount?: number;
/**
* Detailed breakdown of the cached tokens by modality (for example, text or
* image). This list provides granular insight into which parts of
* the content were cached.
*/
cacheTokensDetails?: ModalityTokenCount[];
}
/**
* Represents token counting info for a single modality.
*
* @public
*/
export interface ModalityTokenCount {
/** The modality associated with this token count. */
modality: Modality;
/** The number of tokens counted. */
tokenCount: number;
}
/**
* If the prompt was blocked, this will be populated with `blockReason` and
* the relevant `safetyRatings`.
* @public
*/
export interface PromptFeedback {
blockReason?: BlockReason;
safetyRatings: SafetyRating[];
/**
* A human-readable description of the `blockReason`.
*
* This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
*/
blockReasonMessage?: string;
}
/**
* A candidate returned as part of a {@link GenerateContentResponse}.
* @public
*/
export interface GenerateContentCandidate {
index: number;
content: Content;
finishReason?: FinishReason;
finishMessage?: string;
safetyRatings?: SafetyRating[];
citationMetadata?: CitationMetadata;
groundingMetadata?: GroundingMetadata;
urlContextMetadata?: URLContextMetadata;
}
/**
* Citation metadata that may be found on a {@link GenerateContentCandidate}.
* @public
*/
export interface CitationMetadata {
citations: Citation[];
}
/**
* A single citation.
* @public
*/
export interface Citation {
startIndex?: number;
endIndex?: number;
uri?: string;
license?: string;
/**
* The title of the cited source, if available.
*
* This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
*/
title?: string;
/**
* The publication date of the cited source, if available.
*
* This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
*/
publicationDate?: Date;
}
/**
* Metadata returned when grounding is enabled.
*
* Currently, only Grounding with Google Search is supported (see {@link GoogleSearchTool}).
*
* Important: If using Grounding with Google Search, you are required to comply with the
* "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
* or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}
* section within the Service Specific Terms).
*
* @public
*/
export interface GroundingMetadata {
/**
* Google Search entry point for web searches. This contains an HTML/CSS snippet that must be
* embedded in an app to display a Google Search entry point for follow-up web searches related to
* a model's "Grounded Response".
*/
searchEntryPoint?: SearchEntrypoint;
/**
* A list of {@link GroundingChunk} objects. Each chunk represents a piece of retrieved content
* (for example, from a web page). that the model used to ground its response.
*/
groundingChunks?: GroundingChunk[];
/**
* A list of {@link GroundingSupport} objects. Each object details how specific segments of the
* model's response are supported by the `groundingChunks`.
*/
groundingSupports?: GroundingSupport[];
/**
* A list of web search queries that the model performed to gather the grounding information.
* These can be used to allow users to explore the search results themselves.
*/
webSearchQueries?: string[];
/**
* @deprecated Use {@link GroundingSupport} instead.
*/
retrievalQueries?: string[];
}
/**
* Google search entry point.
*
* @public
*/
export interface SearchEntrypoint {
/**
* HTML/CSS snippet that must be embedded in a web page. The snippet is designed to avoid
* undesired interaction with the rest of the page's CSS.
*
* To ensure proper rendering and prevent CSS conflicts, it is recommended
* to encapsulate this `renderedContent` within a shadow DOM when embedding it
* into a webpage. See {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_components/Using_shadow_DOM | MDN: Using shadow DOM}.
*
* @example
* ```javascript
* const container = document.createElement('div');
* document.body.appendChild(container);
* container.attachShadow({ mode: 'open' }).innerHTML = renderedContent;
* ```
*/
renderedContent?: string;
}
/**
* Represents a chunk of retrieved data that supports a claim in the model's response. This is part
* of the grounding information provided when grounding is enabled.
*
* @public
*/
export interface GroundingChunk {
/**
* Contains details if the grounding chunk is from a web source.
*/
web?: WebGroundingChunk;
}
/**
* A grounding chunk from the web.
*
* Important: If using Grounding with Google Search, you are required to comply with the
* {@link https://cloud.google.com/terms/service-terms | Service Specific Terms} for "Grounding with Google Search".
*
* @public
*/
export interface WebGroundingChunk {
/**
* The URI of the retrieved web page.
*/
uri?: string;
/**
* The title of the retrieved web page.
*/
title?: string;
/**
* The domain of the original URI from which the content was retrieved.
*
* This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
* When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be
* `undefined`.
*/
domain?: string;
}
/**
* Provides information about how a specific segment of the model's response is supported by the
* retrieved grounding chunks.
*
* @public
*/
export interface GroundingSupport {
/**
* Specifies the segment of the model's response content that this grounding support pertains to.
*/
segment?: Segment;
/**
* A list of indices that refer to specific {@link GroundingChunk} objects within the
* {@link GroundingMetadata.groundingChunks} array. These referenced chunks
* are the sources that support the claim made in the associated `segment` of the response.
* For example, an array `[1, 3, 4]` means that `groundingChunks[1]`, `groundingChunks[3]`,
* and `groundingChunks[4]` are the retrieved content supporting this part of the response.
*/
groundingChunkIndices?: number[];
}
/**
* Represents a specific segment within a {@link Content} object, often used to
* pinpoint the exact location of text or data that grounding information refers to.
*
* @public
*/
export interface Segment {
/**
* The zero-based index of the {@link Part} object within the `parts` array
* of its parent {@link Content} object. This identifies which part of the
* content the segment belongs to.
*/
partIndex: number;
/**
* The zero-based start index of the segment within the specified `Part`,
* measured in UTF-8 bytes. This offset is inclusive, starting from 0 at the
* beginning of the part's content (e.g., `Part.text`).
*/
startIndex: number;
/**
* The zero-based end index of the segment within the specified `Part`,
* measured in UTF-8 bytes. This offset is exclusive, meaning the character
* at this index is not included in the segment.
*/
endIndex: number;
/**
* The text corresponding to the segment from the response.
*/
text: string;
}
/**
* Metadata related to {@link URLContextTool}.
*
* @public
*/
export interface URLContextMetadata {
/**
* List of URL metadata used to provide context to the Gemini model.
*/
urlMetadata: URLMetadata[];
}
/**
* Metadata for a single URL retrieved by the {@link URLContextTool} tool.
*
* @public
*/
export interface URLMetadata {
/**
* The retrieved URL.
*/
retrievedUrl?: string;
/**
* The status of the URL retrieval.
*/
urlRetrievalStatus?: URLRetrievalStatus;
}
/**
* The status of a URL retrieval.
*
* @remarks
* <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.
* <br/>
* <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.
* <br/>
* <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.
* <br/>
* <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.
* <br/>
* <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.
* <br/>
*
* @public
*/
export declare const URLRetrievalStatus: {
/**
* Unspecified retrieval status.
*/
URL_RETRIEVAL_STATUS_UNSPECIFIED: string;
/**
* The URL retrieval was successful.
*/
URL_RETRIEVAL_STATUS_SUCCESS: string;
/**
* The URL retrieval failed.
*/
URL_RETRIEVAL_STATUS_ERROR: string;
/**
* The URL retrieval failed because the content is behind a paywall.
*/
URL_RETRIEVAL_STATUS_PAYWALL: string;
/**
* The URL retrieval failed because the content is unsafe.
*/
URL_RETRIEVAL_STATUS_UNSAFE: string;
};
/**
* The status of a URL retrieval.
*
* @remarks
* <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.
* <br/>
* <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.
* <br/>
* <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.
* <br/>
* <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.
* <br/>
* <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.
* <br/>
*
* @public
*/
export type URLRetrievalStatus = (typeof URLRetrievalStatus)[keyof typeof URLRetrievalStatus];
/**
* @public
*/
export interface WebAttribution {
uri: string;
title: string;
}
/**
* @public
*/
export interface RetrievedContextAttribution {
uri: string;
title: string;
}
/**
* Protobuf google.type.Date
* @public
*/
export interface Date {
year: number;
month: number;
day: number;
}
/**
* A safety rating associated with a {@link GenerateContentCandidate}
* @public
*/
export interface SafetyRating {
category: HarmCategory;
probability: HarmProbability;
/**
* The harm severity level.
*
* This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
* When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to `HarmSeverity.UNSUPPORTED`.
*/
severity: HarmSeverity;
/**
* The probability score of the harm category.
*
* This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
* When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.
*/
probabilityScore: number;
/**
* The severity score of the harm category.
*
* This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
* When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.
*/
severityScore: number;
blocked: boolean;
}
/**
* Response from calling {@link GenerativeModel.countTokens}.
* @public
*/
export interface CountTokensResponse {
/**
* The total number of tokens counted across all instances from the request.
*/
totalTokens: number;
/**
* @deprecated Use `totalTokens` instead. This property is undefined when using models greater than `gemini-1.5-*`.
*
* The total number of billable characters counted across all instances
* from the request.
*/
totalBillableCharacters?: number;
/**
* The breakdown, by modality, of how many tokens are consumed by the prompt.
*/
promptTokensDetails?: ModalityTokenCount[];
}
/**
* An incremental content update from the model.
*
* @beta
*/
export interface LiveServerContent {
type: 'serverContent';
/**
* The content that the model has generated as part of the current conversation with the user.
*/
modelTurn?: Content;
/**
* Indicates whether the turn is complete. This is `undefined` if the turn is not complete.
*/
turnComplete?: boolean;
/**
* Indicates whether the model was interrupted by the client. An interruption occurs when
* the client sends a message before the model finishes it's turn. This is `undefined` if the
* model was not interrupted.
*/
interrupted?: boolean;
/**
* Transcription of the audio that was input to the model.
*/
inputTranscription?: Transcription;
/**
* Transcription of the audio output from the model.
*/
outputTranscription?: Transcription;
}
/**
* Transcription of audio. This can be returned from a {@link LiveGenerativeModel} if transcription
* is enabled with the `inputAudioTranscription` or `outputAudioTranscription` properties on
* the {@link LiveGenerationConfig}.
*
* @beta
*/
export interface Transcription {
/**
* The text transcription of the audio.
*/
text?: string;
}
/**
* A request from the model for the client to execute one or more functions.
*
* @beta
*/
export interface LiveServerToolCall {
type: 'toolCall';
/**
* An array of function calls to run.
*/
functionCalls: FunctionCall[];
}
/**
* Notification to cancel a previous function call triggered by {@link LiveServerToolCall}.
*
* @beta
*/
export interface LiveServerToolCallCancellation {
type: 'toolCallCancellation';
/**
* IDs of function calls that were cancelled. These refer to the `id` property of a {@link FunctionCall}.
*/
functionIds: string[];
}
/**
* Notification that the server will not be able to service the client soon.
*
* @beta
*/
export interface LiveServerGoingAwayNotice {
type: 'goingAwayNotice';
/**
* The remaining time (in seconds) before the connection will be terminated.
*/
timeLeft: number;
}
/**
* The types of responses that can be returned by {@link LiveSession.receive}.
*
* @beta
*/
export declare const LiveResponseType: {
SERVER_CONTENT: string;
TOOL_CALL: string;
TOOL_CALL_CANCELLATION: string;
GOING_AWAY_NOTICE: string;
};
/**
* The types of responses that can be returned by {@link LiveSession.receive}.
* This is a property on all messages that can be used for type narrowing. This property is not
* returned by the server, it is assigned to a server message object once it's parsed.
*
* @beta
*/
export type LiveResponseType = (typeof LiveResponseType)[keyof typeof LiveResponseType];

View File

@@ -0,0 +1,139 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Contains the list of OpenAPI data types
* as defined by the
* {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}
* @public
*/
export declare const SchemaType: {
/** String type. */
readonly STRING: "string";
/** Number type. */
readonly NUMBER: "number";
/** Integer type. */
readonly INTEGER: "integer";
/** Boolean type. */
readonly BOOLEAN: "boolean";
/** Array type. */
readonly ARRAY: "array";
/** Object type. */
readonly OBJECT: "object";
};
/**
* Contains the list of OpenAPI data types
* as defined by the
* {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}
* @public
*/
export type SchemaType = (typeof SchemaType)[keyof typeof SchemaType];
/**
* Basic {@link Schema} properties shared across several Schema-related
* types.
* @public
*/
export interface SchemaShared<T> {
/**
* An array of {@link Schema}. The generated data must be valid against any of the schemas
* listed in this array. This allows specifying multiple possible structures or types for a
* single field.
*/
anyOf?: T[];
/** Optional. The format of the property.
* When using the Gemini Developer API ({@link GoogleAIBackend}), this must be either `'enum'` or
* `'date-time'`, otherwise requests will fail.
*/
format?: string;
/** Optional. The description of the property. */
description?: string;
/**
* The title of the property. This helps document the schema's purpose but does not typically
* constrain the generated value. It can subtly guide the model by clarifying the intent of a
* field.
*/
title?: string;
/** Optional. The items of the property. */
items?: T;
/** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */
minItems?: number;
/** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */
maxItems?: number;
/** Optional. Map of `Schema` objects. */
properties?: {
[k: string]: T;
};
/** A hint suggesting the order in which the keys should appear in the generated JSON string. */
propertyOrdering?: string[];
/** Optional. The enum of the property. */
enum?: string[];
/** Optional. The example of the property. */
example?: unknown;
/** Optional. Whether the property is nullable. */
nullable?: boolean;
/** The minimum value of a numeric type. */
minimum?: number;
/** The maximum value of a numeric type. */
maximum?: number;
[key: string]: unknown;
}
/**
* Params passed to {@link Schema} static methods to create specific
* {@link Schema} classes.
* @public
*/
export interface SchemaParams extends SchemaShared<SchemaInterface> {
}
/**
* Final format for {@link Schema} params passed to backend requests.
* @public
*/
export interface SchemaRequest extends SchemaShared<SchemaRequest> {
/**
* The type of the property. this can only be undefined when using `anyOf` schemas,
* which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification }.
*/
type?: SchemaType;
/** Optional. Array of required property. */
required?: string[];
}
/**
* Interface for {@link Schema} class.
* @public
*/
export interface SchemaInterface extends SchemaShared<SchemaInterface> {
/**
* The type of the property. this can only be undefined when using `anyof` schemas,
* which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI Specification}.
*/
type?: SchemaType;
}
/**
* Interface for JSON parameters in a schema of {@link (SchemaType:type)}
* "object" when not using the `Schema.object()` helper.
* @public
*/
export interface ObjectSchemaRequest extends SchemaRequest {
type: 'object';
/**
* This is not a property accepted in the final request to the backend, but is
* a client-side convenience property that is only usable by constructing
* a schema through the `Schema.object()` helper method. Populating this
* property will cause response errors if the object is not wrapped with
* `Schema.object()`.
*/
optionalProperties?: never;
}

67
node_modules/@firebase/ai/dist/esm/src/websocket.d.ts generated vendored Normal file
View File

@@ -0,0 +1,67 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A standardized interface for interacting with a WebSocket connection.
* This abstraction allows the SDK to use the appropriate WebSocket implementation
* for the current JS environment (Browser vs. Node) without
* changing the core logic of the `LiveSession`.
* @internal
*/
export interface WebSocketHandler {
/**
* Establishes a connection to the given URL.
*
* @param url The WebSocket URL (e.g., wss://...).
* @returns A promise that resolves on successful connection or rejects on failure.
*/
connect(url: string): Promise<void>;
/**
* Sends data over the WebSocket.
*
* @param data The string or binary data to send.
*/
send(data: string | ArrayBuffer): void;
/**
* Returns an async generator that yields parsed JSON objects from the server.
* The yielded type is `unknown` because the handler cannot guarantee the shape of the data.
* The consumer is responsible for type validation.
* The generator terminates when the connection is closed.
*
* @returns A generator that allows consumers to pull messages using a `for await...of` loop.
*/
listen(): AsyncGenerator<unknown>;
/**
* Closes the WebSocket connection.
*
* @param code - A numeric status code explaining why the connection is closing.
* @param reason - A human-readable string explaining why the connection is closing.
*/
close(code?: number, reason?: string): Promise<void>;
}
/**
* A wrapper for the native `WebSocket` available in both Browsers and Node >= 22.
*
* @internal
*/
export declare class WebSocketHandlerImpl implements WebSocketHandler {
private ws?;
constructor();
connect(url: string): Promise<void>;
send(data: string | ArrayBuffer): void;
listen(): AsyncGenerator<unknown>;
close(code?: number, reason?: string): Promise<void>;
}

4820
node_modules/@firebase/ai/dist/index.cjs.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

1
node_modules/@firebase/ai/dist/index.cjs.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

4512
node_modules/@firebase/ai/dist/index.node.cjs.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

1
node_modules/@firebase/ai/dist/index.node.cjs.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

4457
node_modules/@firebase/ai/dist/index.node.mjs generated vendored Normal file

File diff suppressed because it is too large Load Diff

1
node_modules/@firebase/ai/dist/index.node.mjs.map generated vendored Normal file

File diff suppressed because one or more lines are too long

121
node_modules/@firebase/ai/dist/src/api.d.ts generated vendored Normal file
View File

@@ -0,0 +1,121 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { FirebaseApp } from '@firebase/app';
import { AI_TYPE } from './constants';
import { AIService } from './service';
import { AI, AIOptions } from './public-types';
import { ImagenModelParams, HybridParams, ModelParams, RequestOptions, LiveModelParams } from './types';
import { AIError } from './errors';
import { AIModel, GenerativeModel, LiveGenerativeModel, ImagenModel } from './models';
import { TemplateGenerativeModel } from './models/template-generative-model';
import { TemplateImagenModel } from './models/template-imagen-model';
export { ChatSession } from './methods/chat-session';
export { LiveSession } from './methods/live-session';
export * from './requests/schema-builder';
export { ImagenImageFormat } from './requests/imagen-image-format';
export { AIModel, GenerativeModel, LiveGenerativeModel, ImagenModel, TemplateGenerativeModel, TemplateImagenModel, AIError };
export { Backend, VertexAIBackend, GoogleAIBackend } from './backend';
export { startAudioConversation, AudioConversationController, StartAudioConversationOptions } from './methods/live-session-helpers';
declare module '@firebase/component' {
interface NameServiceMapping {
[AI_TYPE]: AIService;
}
}
/**
* Returns the default {@link AI} instance that is associated with the provided
* {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the
* default settings.
*
* @example
* ```javascript
* const ai = getAI(app);
* ```
*
* @example
* ```javascript
* // Get an AI instance configured to use the Gemini Developer API (via Google AI).
* const ai = getAI(app, { backend: new GoogleAIBackend() });
* ```
*
* @example
* ```javascript
* // Get an AI instance configured to use the Vertex AI Gemini API.
* const ai = getAI(app, { backend: new VertexAIBackend() });
* ```
*
* @param app - The {@link @firebase/app#FirebaseApp} to use.
* @param options - {@link AIOptions} that configure the AI instance.
* @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}.
*
* @public
*/
export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI;
/**
* Returns a {@link GenerativeModel} class with methods for inference
* and other functionality.
*
* @public
*/
export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel;
/**
* Returns an {@link ImagenModel} class with methods for using Imagen.
*
* Only Imagen 3 models (named `imagen-3.0-*`) are supported.
*
* @param ai - An {@link AI} instance.
* @param modelParams - Parameters to use when making Imagen requests.
* @param requestOptions - Additional options to use when making requests.
*
* @throws If the `apiKey` or `projectId` fields are missing in your
* Firebase config.
*
* @public
*/
export declare function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel;
/**
* Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication.
*
* The Live API is only supported in modern browser windows and Node >= 22.
*
* @param ai - An {@link AI} instance.
* @param modelParams - Parameters to use when setting up a {@link LiveSession}.
* @throws If the `apiKey` or `projectId` fields are missing in your
* Firebase config.
*
* @beta
*/
export declare function getLiveGenerativeModel(ai: AI, modelParams: LiveModelParams): LiveGenerativeModel;
/**
* Returns a {@link TemplateGenerativeModel} class for executing server-side
* templates.
*
* @param ai - An {@link AI} instance.
* @param requestOptions - Additional options to use when making requests.
*
* @beta
*/
export declare function getTemplateGenerativeModel(ai: AI, requestOptions?: RequestOptions): TemplateGenerativeModel;
/**
* Returns a {@link TemplateImagenModel} class for executing server-side
* Imagen templates.
*
* @param ai - An {@link AI} instance.
* @param requestOptions - Additional options to use when making requests.
*
* @beta
*/
export declare function getTemplateImagenModel(ai: AI, requestOptions?: RequestOptions): TemplateImagenModel;

98
node_modules/@firebase/ai/dist/src/backend.d.ts generated vendored Normal file
View File

@@ -0,0 +1,98 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { BackendType } from './public-types';
/**
* Abstract base class representing the configuration for an AI service backend.
* This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for
* the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and
* {@link VertexAIBackend} for the Vertex AI Gemini API.
*
* @public
*/
export declare abstract class Backend {
/**
* Specifies the backend type.
*/
readonly backendType: BackendType;
/**
* Protected constructor for use by subclasses.
* @param type - The backend type.
*/
protected constructor(type: BackendType);
/**
* @internal
*/
abstract _getModelPath(project: string, model: string): string;
/**
* @internal
*/
abstract _getTemplatePath(project: string, templateId: string): string;
}
/**
* Configuration class for the Gemini Developer API.
*
* Use this with {@link AIOptions} when initializing the AI service via
* {@link getAI | getAI()} to specify the Gemini Developer API as the backend.
*
* @public
*/
export declare class GoogleAIBackend extends Backend {
/**
* Creates a configuration object for the Gemini Developer API backend.
*/
constructor();
/**
* @internal
*/
_getModelPath(project: string, model: string): string;
/**
* @internal
*/
_getTemplatePath(project: string, templateId: string): string;
}
/**
* Configuration class for the Vertex AI Gemini API.
*
* Use this with {@link AIOptions} when initializing the AI service via
* {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend.
*
* @public
*/
export declare class VertexAIBackend extends Backend {
/**
* The region identifier.
* See {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}
* for a list of supported locations.
*/
readonly location: string;
/**
* Creates a configuration object for the Vertex AI backend.
*
* @param location - The region identifier, defaulting to `us-central1`;
* see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}
* for a list of supported locations.
*/
constructor(location?: string);
/**
* @internal
*/
_getModelPath(project: string, model: string): string;
/**
* @internal
*/
_getTemplatePath(project: string, templateId: string): string;
}

29
node_modules/@firebase/ai/dist/src/constants.d.ts generated vendored Normal file
View File

@@ -0,0 +1,29 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export declare const AI_TYPE = "AI";
export declare const DEFAULT_LOCATION = "us-central1";
export declare const DEFAULT_DOMAIN = "firebasevertexai.googleapis.com";
export declare const STAGING_URL = "https://staging-firebasevertexai.sandbox.googleapis.com";
export declare const DEFAULT_API_VERSION = "v1beta";
export declare const PACKAGE_VERSION: string;
export declare const LANGUAGE_TAG = "gl-js";
export declare const HYBRID_TAG = "hybrid";
export declare const DEFAULT_FETCH_TIMEOUT_MS: number;
/**
* Defines the name of the default in-cloud model to use for hybrid inference.
*/
export declare const DEFAULT_HYBRID_IN_CLOUD_MODEL = "gemini-2.5-flash-lite";

35
node_modules/@firebase/ai/dist/src/errors.d.ts generated vendored Normal file
View File

@@ -0,0 +1,35 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { FirebaseError } from '@firebase/util';
import { AIErrorCode, CustomErrorData } from './types';
/**
* Error class for the Firebase AI SDK.
*
* @public
*/
export declare class AIError extends FirebaseError {
readonly code: AIErrorCode;
readonly customErrorData?: CustomErrorData | undefined;
/**
* Constructs a new instance of the `AIError` class.
*
* @param code - The error code from {@link (AIErrorCode:type)}.
* @param message - A human-readable message describing the error.
* @param customErrorData - Optional error data.
*/
constructor(code: AIErrorCode, message: string, customErrorData?: CustomErrorData | undefined);
}

View File

@@ -0,0 +1,19 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ComponentContainer, InstanceFactoryOptions } from '@firebase/component';
import { AIService } from './service';
export declare function factory(container: ComponentContainer, { instanceIdentifier }: InstanceFactoryOptions): AIService;

19
node_modules/@firebase/ai/dist/src/factory-node.d.ts generated vendored Normal file
View File

@@ -0,0 +1,19 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ComponentContainer, InstanceFactoryOptions } from '@firebase/component';
import { AIService } from './service';
export declare function factory(container: ComponentContainer, { instanceIdentifier }: InstanceFactoryOptions): AIService;

View File

@@ -0,0 +1,73 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { CountTokensRequest, GenerateContentCandidate, GenerateContentRequest, GenerateContentResponse, PromptFeedback } from './types';
import { GoogleAIGenerateContentResponse, GoogleAIGenerateContentCandidate, GoogleAICountTokensRequest } from './types/googleai';
/**
* This SDK supports both the Vertex AI Gemini API and the Gemini Developer API (using Google AI).
* The public API prioritizes the format used by the Vertex AI Gemini API.
* We avoid having two sets of types by translating requests and responses between the two API formats.
* This translation allows developers to switch between the Vertex AI Gemini API and the Gemini Developer API
* with minimal code changes.
*
* In here are functions that map requests and responses between the two API formats.
* Requests in the Vertex AI format are mapped to the Google AI format before being sent.
* Responses from the Google AI backend are mapped back to the Vertex AI format before being returned to the user.
*/
/**
* Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI.
*
* @param generateContentRequest The {@link GenerateContentRequest} to map.
* @returns A {@link GenerateContentResponse} that conforms to the Google AI format.
*
* @throws If the request contains properties that are unsupported by Google AI.
*
* @internal
*/
export declare function mapGenerateContentRequest(generateContentRequest: GenerateContentRequest): GenerateContentRequest;
/**
* Maps a {@link GenerateContentResponse} from Google AI to the format of the
* {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API.
*
* @param googleAIResponse The {@link GenerateContentResponse} from Google AI.
* @returns A {@link GenerateContentResponse} that conforms to the public API's format.
*
* @internal
*/
export declare function mapGenerateContentResponse(googleAIResponse: GoogleAIGenerateContentResponse): GenerateContentResponse;
/**
* Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI.
*
* @param countTokensRequest The {@link CountTokensRequest} to map.
* @param model The model to count tokens with.
* @returns A {@link CountTokensRequest} that conforms to the Google AI format.
*
* @internal
*/
export declare function mapCountTokensRequest(countTokensRequest: CountTokensRequest, model: string): GoogleAICountTokensRequest;
/**
* Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms
* to the Vertex AI API format.
*
* @param candidates The {@link GoogleAIGenerateContentCandidate} to map.
* @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format.
*
* @throws If any {@link Part} in the candidates has a `videoMetadata` property.
*
* @internal
*/
export declare function mapGenerateContentCandidates(candidates: GoogleAIGenerateContentCandidate[]): GenerateContentCandidate[];
export declare function mapPromptFeedback(promptFeedback: PromptFeedback): PromptFeedback;

30
node_modules/@firebase/ai/dist/src/helpers.d.ts generated vendored Normal file
View File

@@ -0,0 +1,30 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Backend } from './backend';
/**
* Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI}
* instances by backend type.
*
* @internal
*/
export declare function encodeInstanceIdentifier(backend: Backend): string;
/**
* Decodes an instance identifier string into a {@link Backend}.
*
* @internal
*/
export declare function decodeInstanceIdentifier(instanceIdentifier: string): Backend;

13
node_modules/@firebase/ai/dist/src/index.d.ts generated vendored Normal file
View File

@@ -0,0 +1,13 @@
/**
* The Firebase AI Web SDK.
*
* @packageDocumentation
*/
import { LanguageModel } from './types/language-model';
declare global {
interface Window {
LanguageModel: LanguageModel;
}
}
export * from './api';
export * from './public-types';

7
node_modules/@firebase/ai/dist/src/index.node.d.ts generated vendored Normal file
View File

@@ -0,0 +1,7 @@
/**
* The Firebase AI Web SDK.
*
* @packageDocumentation
*/
export * from './api';
export * from './public-types';

18
node_modules/@firebase/ai/dist/src/logger.d.ts generated vendored Normal file
View File

@@ -0,0 +1,18 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Logger } from '@firebase/logger';
export declare const logger: Logger;

View File

@@ -0,0 +1,18 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Content } from '../types';
export declare function validateChatHistory(history: Content[]): void;

View File

@@ -0,0 +1,77 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Content, FunctionCall, FunctionResponsePart, GenerateContentRequest, GenerateContentResponse, GenerateContentResult, GenerateContentStreamResult, Part, RequestOptions, SingleRequestOptions, StartChatParams } from '../types';
import { ApiSettings } from '../types/internal';
import { ChromeAdapter } from '../types/chrome-adapter';
/**
* ChatSession class that enables sending chat messages and stores
* history of sent and received messages so far.
*
* @public
*/
export declare class ChatSession {
model: string;
private chromeAdapter?;
params?: StartChatParams | undefined;
requestOptions?: RequestOptions | undefined;
private _apiSettings;
private _history;
/**
* Ensures sequential execution of chat messages to maintain history order.
* Each call waits for the previous one to settle before proceeding.
*/
private _sendPromise;
constructor(apiSettings: ApiSettings, model: string, chromeAdapter?: ChromeAdapter | undefined, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
/**
* Gets the chat history so far. Blocked prompts are not added to history.
* Neither blocked candidates nor the prompts that generated them are added
* to history.
*/
getHistory(): Promise<Content[]>;
/**
* Format Content into a request for generateContent or
* generateContentStream.
* @internal
*/
_formatRequest(incomingContent: Content, tempHistory: Content[]): GenerateContentRequest;
/**
* Sends a chat message and receives a non-streaming
* {@link GenerateContentResult}
*/
sendMessage(request: string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
/**
* Sends a chat message and receives the response as a
* {@link GenerateContentStreamResult} containing an iterable stream
* and a response promise.
*/
sendMessageStream(request: string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
/**
* Get function calls that the SDK has references to actually call.
* This is all-or-nothing. If the model is requesting multiple
* function calls, all of them must have references in order for
* automatic function calling to work.
*
* @internal
*/
_getCallableFunctionCalls(response?: GenerateContentResponse): FunctionCall[] | undefined;
/**
* Call user-defined functions if requested by the model, and return
* the response that should be sent to the model.
* @internal
*/
_callFunctionsAsNeeded(functionCalls: FunctionCall[]): Promise<FunctionResponsePart[]>;
}

View File

@@ -0,0 +1,124 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { CountTokensRequest, GenerateContentRequest, InferenceMode, OnDeviceParams } from '../types';
import { ChromeAdapter } from '../types/chrome-adapter';
import { LanguageModel } from '../types/language-model';
/**
* Defines an inference "backend" that uses Chrome's on-device model,
* and encapsulates logic for detecting when on-device inference is
* possible.
*/
export declare class ChromeAdapterImpl implements ChromeAdapter {
languageModelProvider: LanguageModel;
mode: InferenceMode;
static SUPPORTED_MIME_TYPES: string[];
private isDownloading;
private downloadPromise;
private oldSession;
onDeviceParams: OnDeviceParams;
constructor(languageModelProvider: LanguageModel, mode: InferenceMode, onDeviceParams?: OnDeviceParams);
/**
* Checks if a given request can be made on-device.
*
* Encapsulates a few concerns:
* the mode
* API existence
* prompt formatting
* model availability, including triggering download if necessary
*
*
* Pros: callers needn't be concerned with details of on-device availability.</p>
* Cons: this method spans a few concerns and splits request validation from usage.
* If instance variables weren't already part of the API, we could consider a better
* separation of concerns.
*/
isAvailable(request: GenerateContentRequest): Promise<boolean>;
/**
* Generates content on device.
*
* @remarks
* This is comparable to {@link GenerativeModel.generateContent} for generating content in
* Cloud.
* @param request - a standard Firebase AI {@link GenerateContentRequest}
* @returns {@link Response}, so we can reuse common response formatting.
*/
generateContent(request: GenerateContentRequest): Promise<Response>;
/**
* Generates content stream on device.
*
* @remarks
* This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
* Cloud.
* @param request - a standard Firebase AI {@link GenerateContentRequest}
* @returns {@link Response}, so we can reuse common response formatting.
*/
generateContentStream(request: GenerateContentRequest): Promise<Response>;
countTokens(_request: CountTokensRequest): Promise<Response>;
/**
* Asserts inference for the given request can be performed by an on-device model.
*/
private static isOnDeviceRequest;
/**
* Encapsulates logic to get availability and download a model if one is downloadable.
*/
private downloadIfAvailable;
/**
* Triggers out-of-band download of an on-device model.
*
* Chrome only downloads models as needed. Chrome knows a model is needed when code calls
* LanguageModel.create.
*
* Since Chrome manages the download, the SDK can only avoid redundant download requests by
* tracking if a download has previously been requested.
*/
private download;
/**
* Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object.
*/
private static toLanguageModelMessage;
/**
* Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object.
*/
private static toLanguageModelMessageContent;
/**
* Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string.
*/
private static toLanguageModelMessageRole;
/**
* Abstracts Chrome session creation.
*
* Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all
* inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all
* inference.
*
* Chrome will remove a model from memory if it's no longer in use, so this method ensures a
* new session is created before an old session is destroyed.
*/
private createSession;
/**
* Formats string returned by Chrome as a {@link Response} returned by Firebase AI.
*/
private static toResponse;
/**
* Formats string stream returned by Chrome as SSE returned by Firebase AI.
*/
private static toStreamResponse;
}
/**
* Creates a ChromeAdapterImpl on demand.
*/
export declare function chromeAdapterFactory(mode: InferenceMode, window?: Window, params?: OnDeviceParams): ChromeAdapterImpl | undefined;

View File

@@ -0,0 +1,21 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { CountTokensRequest, CountTokensResponse, SingleRequestOptions, RequestOptions } from '../types';
import { ApiSettings } from '../types/internal';
import { ChromeAdapter } from '../types/chrome-adapter';
export declare function countTokensOnCloud(apiSettings: ApiSettings, model: string, params: CountTokensRequest, singleRequestOptions?: SingleRequestOptions): Promise<CountTokensResponse>;
export declare function countTokens(apiSettings: ApiSettings, model: string, params: CountTokensRequest, chromeAdapter?: ChromeAdapter, requestOptions?: RequestOptions): Promise<CountTokensResponse>;

View File

@@ -0,0 +1,25 @@
/**
* @license
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { GenerateContentRequest, GenerateContentResponse, GenerateContentResult, GenerateContentStreamResult, SingleRequestOptions } from '../types';
import { ApiSettings } from '../types/internal';
import { ChromeAdapter } from '../types/chrome-adapter';
export declare function generateContentStream(apiSettings: ApiSettings, model: string, params: GenerateContentRequest, chromeAdapter?: ChromeAdapter, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult & {
firstValue?: GenerateContentResponse;
}>;
export declare function templateGenerateContent(apiSettings: ApiSettings, templateId: string, templateParams: object, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
export declare function templateGenerateContentStream(apiSettings: ApiSettings, templateId: string, templateParams: object, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
export declare function generateContent(apiSettings: ApiSettings, model: string, params: GenerateContentRequest, chromeAdapter?: ChromeAdapter, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;

View File

@@ -0,0 +1,154 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { FunctionCall, FunctionResponse } from '../types';
import { LiveSession } from './live-session';
/**
* A controller for managing an active audio conversation.
*
* @beta
*/
export interface AudioConversationController {
/**
* Stops the audio conversation, closes the microphone connection, and
* cleans up resources. Returns a promise that resolves when cleanup is complete.
*/
stop: () => Promise<void>;
}
/**
* Options for {@link startAudioConversation}.
*
* @beta
*/
export interface StartAudioConversationOptions {
/**
* An async handler that is called when the model requests a function to be executed.
* The handler should perform the function call and return the result as a `Part`,
* which will then be sent back to the model.
*/
functionCallingHandler?: (functionCalls: FunctionCall[]) => Promise<FunctionResponse>;
}
/**
* Dependencies needed by the {@link AudioConversationRunner}.
*
* @internal
*/
interface RunnerDependencies {
audioContext: AudioContext;
mediaStream: MediaStream;
sourceNode: MediaStreamAudioSourceNode;
workletNode: AudioWorkletNode;
}
/**
* Encapsulates the core logic of an audio conversation.
*
* @internal
*/
export declare class AudioConversationRunner {
private readonly liveSession;
private readonly options;
private readonly deps;
/** A flag to indicate if the conversation has been stopped. */
private isStopped;
/** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */
private readonly stopDeferred;
/** A promise that tracks the lifecycle of the main `runReceiveLoop`. */
private readonly receiveLoopPromise;
/** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */
private readonly playbackQueue;
/** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */
private scheduledSources;
/** A high-precision timeline pointer for scheduling gapless audio playback. */
private nextStartTime;
/** A mutex to prevent the playback processing loop from running multiple times concurrently. */
private isPlaybackLoopRunning;
constructor(liveSession: LiveSession, options: StartAudioConversationOptions, deps: RunnerDependencies);
/**
* Stops the conversation and unblocks the main receive loop.
*/
stop(): Promise<void>;
/**
* Cleans up all audio resources (nodes, stream tracks, context) and marks the
* session as no longer in a conversation.
*/
private cleanup;
/**
* Adds audio data to the queue and ensures the playback loop is running.
*/
private enqueueAndPlay;
/**
* Stops all current and pending audio playback and clears the queue. This is
* called when the server indicates the model's speech was interrupted with
* `LiveServerContent.modelTurn.interrupted`.
*/
private interruptPlayback;
/**
* Processes the playback queue in a loop, scheduling each chunk in a gapless sequence.
*/
private processPlaybackQueue;
/**
* The main loop that listens for and processes messages from the server.
*/
private runReceiveLoop;
}
/**
* Starts a real-time, bidirectional audio conversation with the model. This helper function manages
* the complexities of microphone access, audio recording, playback, and interruptions.
*
* @remarks Important: This function must be called in response to a user gesture
* (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}.
*
* @example
* ```javascript
* const liveSession = await model.connect();
* let conversationController;
*
* // This function must be called from within a click handler.
* async function startConversation() {
* try {
* conversationController = await startAudioConversation(liveSession);
* } catch (e) {
* // Handle AI-specific errors
* if (e instanceof AIError) {
* console.error("AI Error:", e.message);
* }
* // Handle microphone permission and hardware errors
* else if (e instanceof DOMException) {
* console.error("Microphone Error:", e.message);
* }
* // Handle other unexpected errors
* else {
* console.error("An unexpected error occurred:", e);
* }
* }
* }
*
* // Later, to stop the conversation:
* // if (conversationController) {
* // await conversationController.stop();
* // }
* ```
*
* @param liveSession - An active {@link LiveSession} instance.
* @param options - Configuration options for the audio conversation.
* @returns A `Promise` that resolves with an {@link AudioConversationController}.
* @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`).
* @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions.
*
* @beta
*/
export declare function startAudioConversation(liveSession: LiveSession, options?: StartAudioConversationOptions): Promise<AudioConversationController>;
export {};

Some files were not shown because too many files have changed in this diff Show More