164 lines
6.3 KiB
PHP
164 lines
6.3 KiB
PHP
<?php
|
|
|
|
namespace App\Helpers\FirstParty\OpenAI;
|
|
|
|
use Exception;
|
|
use Illuminate\Support\Facades\Http;
|
|
use Illuminate\Support\Facades\Log;
|
|
|
|
class OpenAI
|
|
{
|
|
public static function getSiteSummary($parent_categories, $user_prompt, $model_max_tokens = 1536, $timeout = 60)
|
|
{
|
|
$openai_config = 'openai-gpt-3-5-turbo-1106';
|
|
|
|
$category_list = implode('|', $parent_categories->pluck('name')->toArray());
|
|
|
|
$system_prompt = "Based on the website content containing an AI tool, return a valid JSON containing:\n{\n\"is_ai_tool\":(true|false),\n\"ai_tool_name\":\"(AI Tool Name)\",\n\"is_app_web_both\":\"(app|web|both)\",\n\"tagline\":\"(One line tagline in 6-8 words)\",\n\"summary\": \"(Summary of AI tool in 2-3 parapgraphs, 200-240 words using grade 8 US english, start with AI tool name)\",\n\"pricing_type\": \"(Free|Free Trial|Freemium|Subscription|Usage Based)\",\n\"main_category\": \"(AI Training|Art|Audio|Avatars|Business|Chatbots|Coaching|Content Generation|Data|Dating|Design|Dev|Education|Emailing|Finance|Gaming|GPTs|Health|Legal|Marketing|Music|Networking|Personal Assistance|Planning|Podcasting|Productivity|Project Management|Prompting|Reporting|Research|Sales|Security|SEO|Shopping|Simulation|Social|Speech|Support|Task|Testing|Training|Translation|UI\/UX|Video|Workflow|Writing)\",\n\"keywords\":[\"(Identify relevant keywords for this AI Tool, 1-2 words each, at least)\"],\n\"qna\":[{\"q\":\"Typical FAQ that readers want to know, up to 5 questions\",\"a\":\"Answer of the question\"}]\n}";
|
|
|
|
return self::getChatCompletion($user_prompt, $system_prompt, $openai_config, $model_max_tokens, $timeout);
|
|
}
|
|
|
|
private static function getChatCompletion($user_prompt, $system_prompt, $openai_config, $model_max_tokens, $timeout, $response_format = 'json_object')
|
|
{
|
|
$model = config("platform.ai.{$openai_config}.model");
|
|
$input_cost_per_thousand_tokens = config("platform.ai.{$openai_config}.input_cost_per_thousand_tokens");
|
|
$output_cost_per_thousand_tokens = config("platform.ai.{$openai_config}.output_cost_per_thousand_tokens");
|
|
|
|
$output_token = 1280;
|
|
|
|
try {
|
|
|
|
$obj = self::chatCompletionApi($system_prompt, $user_prompt, $model, $output_token, $response_format, $timeout);
|
|
|
|
$input_cost = self::getCostUsage($obj->usage_detailed->prompt_tokens, $input_cost_per_thousand_tokens);
|
|
$output_cost = self::getCostUsage($obj->usage_detailed->completion_tokens, $output_cost_per_thousand_tokens);
|
|
|
|
$output = $obj->reply;
|
|
|
|
if ($response_format == 'json_object') {
|
|
$output = json_decode(self::jsonFixer($obj->reply), false, 512, JSON_THROW_ON_ERROR);
|
|
}
|
|
|
|
return (object) [
|
|
'prompts' => (object) [
|
|
'system_prompt' => $system_prompt,
|
|
'user_prompt' => $user_prompt,
|
|
],
|
|
'cost' => $input_cost + $output_cost,
|
|
'output' => $output,
|
|
'token_usage' => $obj->usage,
|
|
'token_usage_detailed' => $obj->usage_detailed,
|
|
];
|
|
} catch (Exception $e) {
|
|
return self::getDefaultFailedResponse($system_prompt, $user_prompt, $e);
|
|
}
|
|
|
|
return self::getDefaultFailedResponse($system_prompt, $user_prompt);
|
|
|
|
}
|
|
|
|
private static function getDefaultFailedResponse($system_prompt, $user_prompt, $exception = null)
|
|
{
|
|
$exception_message = null;
|
|
|
|
if (! is_null($exception)) {
|
|
$exception_message = $exception->getMessage();
|
|
}
|
|
|
|
return (object) [
|
|
'exception' => $exception_message,
|
|
'prompts' => (object) [
|
|
'system_prompt' => $system_prompt,
|
|
'user_prompt' => $user_prompt,
|
|
],
|
|
'cost' => 0,
|
|
'output' => null,
|
|
'token_usage' => 0,
|
|
'token_usage_detailed' => (object) [
|
|
'completion_tokens' => 0,
|
|
'prompt_tokens' => 0,
|
|
'total_tokens' => 0,
|
|
],
|
|
];
|
|
}
|
|
|
|
private static function getCostUsage($token_usage, $cost_per_thousand_tokens)
|
|
{
|
|
$calc = $token_usage / 1000;
|
|
|
|
return $calc * $cost_per_thousand_tokens;
|
|
}
|
|
|
|
private static function jsonFixer($json_string)
|
|
{
|
|
$json_string = str_replace("\n", '', $json_string);
|
|
|
|
// try {
|
|
// return (new JsonFixer)->fix($json_string);
|
|
// }
|
|
// catch(Exception $e) {
|
|
|
|
// }
|
|
return $json_string;
|
|
|
|
}
|
|
|
|
public static function chatCompletionApi($system_prompt, $user_prompt, $model, $max_token = 2500, $response_format = 'text', $timeout = 800)
|
|
{
|
|
|
|
if ($response_format == 'json_object') {
|
|
$arr = [
|
|
'model' => $model,
|
|
'max_tokens' => $max_token,
|
|
'response_format' => (object) [
|
|
'type' => 'json_object',
|
|
],
|
|
'messages' => [
|
|
['role' => 'system', 'content' => $system_prompt],
|
|
['role' => 'user', 'content' => $user_prompt],
|
|
],
|
|
];
|
|
} else {
|
|
$arr = [
|
|
'model' => $model,
|
|
'max_tokens' => $max_token,
|
|
'messages' => [
|
|
['role' => 'system', 'content' => $system_prompt],
|
|
['role' => 'user', 'content' => $user_prompt],
|
|
],
|
|
];
|
|
}
|
|
|
|
try {
|
|
$response = Http::timeout($timeout)->withToken(config('platform.ai.openai.api_key'))
|
|
->post('https://api.openai.com/v1/chat/completions', $arr);
|
|
|
|
$json_response = json_decode($response->body());
|
|
|
|
//dump($json_response);
|
|
|
|
if (isset($json_response->error)) {
|
|
Log::error(serialize($json_response));
|
|
throw new Exception(serialize($json_response->error));
|
|
}
|
|
|
|
$obj = (object) [
|
|
'usage' => $json_response?->usage?->total_tokens,
|
|
'usage_detailed' => $json_response?->usage,
|
|
'reply' => $json_response?->choices[0]?->message?->content,
|
|
|
|
];
|
|
|
|
return $obj;
|
|
} catch (Exception $e) {
|
|
////dd($response->body());
|
|
//inspector()->reportException($e);
|
|
throw ($e);
|
|
}
|
|
|
|
return null;
|
|
|
|
}
|
|
}
|