Update (openai): Fix token issues

This commit is contained in:
2023-09-25 21:12:29 +08:00
parent 5d28d31df6
commit 91a1d7d671

View File

@@ -7,6 +7,8 @@
use Illuminate\Support\Facades\Log;
use Illuminate\Support\Str;
class OpenAI
{
@@ -60,9 +62,12 @@ public static function createNewArticleTitle($current_title, $supporting_data)
return in following json format {\"main_keyword\":\"(Main Keyword)\",\"title\":\"(Title in 90-130 letters)\",\"short_title\":\"(Short Title in 30-40 letters)\",\"article_type\":\"(How-tos|Guides|Interview|Review|Commentary|Feature|News|Editorial|Report|Research|Case-study|Overview|Tutorial|Update|Spotlight|Insights)\",\"description\":\"(Cliffhanger SEO description based on main keyword, do not start with action verb)\",\"photo_keywords\":[\"photo keyword 1\",\"photo keyword 2\"]}";
$supporting_data = Str::substr($supporting_data,0, 2100);
$user_prompt = "Article Title: {$current_title}\n Article Description: {$supporting_data}\n";
$reply = self::chatCompletion($system_prompt, $user_prompt, 'gpt-3.5-turbo');
$reply = self::chatCompletion($system_prompt, $user_prompt, 'gpt-3.5-turbo', 900);
try {
return json_decode($reply, false);
@@ -95,13 +100,13 @@ public static function suggestArticleTitles($current_title, $supporting_data, $s
}
public static function chatCompletion($system_prompt, $user_prompt, $model)
public static function chatCompletion($system_prompt, $user_prompt, $model, $max_token = 2500)
{
try {
$response = Http::timeout(800)->withToken(config('platform.ai.openai.api_key'))
->post('https://api.openai.com/v1/chat/completions', [
'model' => $model,
'max_tokens' => 2500,
'max_tokens' => $max_token,
'messages' => [
['role' => 'system', 'content' => $system_prompt],
['role' => 'user', 'content' => $user_prompt],