dev: add Gemini support (non-streaming)

This commit is contained in:
KernelDeimos
2025-02-11 14:57:17 -05:00
parent 74319b44af
commit b74ec1f69c
5 changed files with 116 additions and 1 deletions

11
package-lock.json generated
View File

@@ -2327,6 +2327,14 @@
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
}
},
"node_modules/@google/generative-ai": {
"version": "0.21.0",
"resolved": "https://registry.npmjs.org/@google/generative-ai/-/generative-ai-0.21.0.tgz",
"integrity": "sha512-7XhUbtnlkSEZK15kN3t+tzIMxsbKm/dSkKBFalj+20NvPKe1kBY7mR2P7vuijEn+f06z5+A8bVGKO0v39cr6Wg==",
"engines": {
"node": ">=18.0.0"
}
},
"node_modules/@grpc/grpc-js": {
"version": "1.10.10",
"resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.10.10.tgz",
@@ -17708,6 +17716,7 @@
"@anthropic-ai/sdk": "^0.26.1",
"@aws-sdk/client-polly": "^3.622.0",
"@aws-sdk/client-textract": "^3.621.0",
"@google/generative-ai": "^0.21.0",
"@heyputer/kv.js": "^0.1.9",
"@heyputer/multest": "^0.0.2",
"@heyputer/putility": "^1.0.0",
@@ -17992,7 +18001,7 @@
},
"src/putility": {
"name": "@heyputer/putility",
"version": "1.0.0",
"version": "1.0.2",
"license": "AGPL-3.0-only"
},
"src/strataparse": {

View File

@@ -10,6 +10,7 @@
"@anthropic-ai/sdk": "^0.26.1",
"@aws-sdk/client-polly": "^3.622.0",
"@aws-sdk/client-textract": "^3.621.0",
"@google/generative-ai": "^0.21.0",
"@heyputer/kv.js": "^0.1.9",
"@heyputer/multest": "^0.0.2",
"@heyputer/putility": "^1.0.0",

View File

@@ -0,0 +1,80 @@
const BaseService = require("../../services/BaseService");
const { GoogleGenerativeAI } = require('@google/generative-ai');
const GeminiSquareHole = require("./lib/GeminiSquareHole");
class GeminiService extends BaseService {
async _init () {
const svc_aiChat = this.services.get('ai-chat');
svc_aiChat.register_provider({
service_name: this.service_name,
alias: true,
});
}
static IMPLEMENTS = {
['puter-chat-completion']: {
async models () {
return await this.models_();
},
async list () {
const models = await this.models_();
const model_names = [];
for ( const model of models ) {
model_names.push(model.id);
if ( model.aliases ) {
model_names.push(...model.aliases);
}
}
return model_names;
},
async complete ({ messages, stream, model, tools }) {
const genAI = new GoogleGenerativeAI(this.config.apiKey);
const genModel = genAI.getGenerativeModel({
model: model ?? 'gemini-2.0-flash',
});
messages = await GeminiSquareHole.process_input_messages(messages);
// History is separate, so the last message gets special treatment.
const last_message = messages.pop();
console.log('last message?', last_message)
const last_message_parts = last_message.parts.map(
part => typeof part === 'string' ? part : part.text
);
const chat = genModel.startChat({
history: messages,
});
const genResult = await chat.sendMessage(last_message_parts)
debugger;
const message = genResult.response.candidates[0];
message.content = message.content.parts;
message.role = 'assistant';
const result = { message };
return result;
}
}
}
async models_ () {
return [
{
id: 'gemini-1.5-flash',
name: 'Gemini 1.5 Flash',
context: 131072,
cost: {
currency: 'usd-cents',
tokens: 1_000_000,
input: 7.5,
output: 30,
},
},
];
}
}
module.exports = { GeminiService };

View File

@@ -99,6 +99,10 @@ class PuterAIModule extends AdvancedBase {
// const { ClaudeEnoughService } = require('./ClaudeEnoughService');
// services.registerService('claude', ClaudeEnoughService);
}
if ( !! config?.services?.['gemini'] ) {
const { GeminiService } = require('./GeminiService');
services.registerService('gemini', GeminiService);
}
const { AIChatService } = require('./AIChatService');
services.registerService('ai-chat', AIChatService);

View File

@@ -0,0 +1,21 @@
/**
* Technically this should be called "GeminiUtil",
* but Google's AI API defies all the established conventions
* so it made sense to defy them here as well.
*/
module.exports = class GeminiSquareHole {
static process_input_messages = async (messages) => {
messages = messages.slice();
for ( const msg of messages ) {
msg.parts = msg.content;
delete msg.content;
if ( msg.role === 'assistant' ) {
msg.role = 'model';
}
}
return messages;
}
}