diff --git a/assistant.go b/assistant.go index 0b218e0..4b4ef18 100644 --- a/assistant.go +++ b/assistant.go @@ -1,6 +1,8 @@ package main import ( + "crypto/md5" + "encoding/base64" "strconv" "strings" "time" @@ -234,6 +236,7 @@ func handleAssistantConversation(c tele.Context, thread []*tele.Message) error { Messages: chatReqMsgs, Temperature: lo.ToPtr(0.42), MaxTokens: 2048, + User: assistantHashUserId(lastMsg.Sender.ID), } typingNotifyCh := setTyping(c) @@ -272,3 +275,9 @@ func assistantRemoveMention(msg, name string) string { } return orig } + +func assistantHashUserId(uid int64) string { + seasoned := []byte("RdnuRPqp66vtbc28QRO0ecKSLKXifz7G9UbXLoyCMpw" + strconv.FormatInt(uid, 10)) + hashed := md5.Sum(seasoned) // Don't judge me + return base64.URLEncoding.EncodeToString(hashed[:])[:22] +} diff --git a/openai/chat.go b/openai/chat.go index ac525df..3564ed6 100644 --- a/openai/chat.go +++ b/openai/chat.go @@ -18,16 +18,16 @@ type ChatMessage struct { type ChatRequest struct { Model string `json:"model"` Messages []ChatMessage `json:"messages"` - Temperature *float64 `json:"temperature,omitempty"` - TopP *float64 `json:"top_p,omitempty"` - N int `json:"n,omitempty"` - Stream bool `json:"stream,omitempty"` - Stop []string `json:"stop,omitempty"` + Temperature *float64 `json:"temperature,omitempty"` // What sampling temperature to use, between 0 and 2. + TopP *float64 `json:"top_p,omitempty"` // Nucleus sampling. Specify this or temperature but not both. + N int `json:"n,omitempty"` // How many chat completion choices to generate for each input message. + Stream bool `json:"stream,omitempty"` // If set, partial message deltas will be sent as data-only server-sent events as they become available. + Stop []string `json:"stop,omitempty"` // Up to 4 sequences where the API will stop generating further tokens. MaxTokens int `json:"max_tokens,omitempty"` - PresencePenalty *float64 `json:"presence_penalty,omitempty"` - FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` - LogitBias map[string]float64 `json:"logit_bias,omitempty"` - User string `json:"user,omitempty"` + PresencePenalty *float64 `json:"presence_penalty,omitempty"` // Number between -2.0 and 2.0. + FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` // Number between -2.0 and 2.0. + LogitBias map[string]float64 `json:"logit_bias,omitempty"` // Modify the likelihood of specified tokens appearing in the completion. + User string `json:"user,omitempty"` // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. } type ChatResponseChoice struct {