TIL 240523

hyeo71·2024년 5월 23일
0

2024 내배캠 AI 트랙

목록 보기
100/108

Summary 구현하기

이전에 구현한 synopsys를 토대로 소설의 줄거리를 구현하고 출력된 소설의 다음 방향성을 결정하는 선택지 3개를 생성하여 프론트엔드에 데이터 넘겨주기

def summary_generator(summary):
    llm=ChatOpenAI(model='gpt-3.5-turbo', api_key=os.getenv('OPENAI_API_KEY'))
    
    memory=ConversationSummaryBufferMemory(llm=llm, max_token_limit=20000, memory_key='chat_history', return_messages=True)
    
    summary_template = ChatPromptTemplate.from_messages([
        ("system", "You are an experienced novelist. Write a concise, realistic, and engaging summary based on the provided theme and previous context. Develop the characters, setting, and plot with rich descriptions. Ensure the summary flows smoothly, highlighting both hope and despair. Make the narrative provocative and creative. Avoid explicit reader interaction prompts or suggested paths."),
        MessagesPlaceholder(variable_name="chat_history"),
        ("human", "{prompt}")
    ])

    recommend_template = ChatPromptTemplate.from_messages([
        ("system", "Based on the current summary prompt, provide three compelling recommendations for the next part of the summary. Your recommendations should emphasize hopeful, tragically hopeless, and starkly realistic choices, respectively. Be extremely contextual and realistic with your recommendations. Each recommendation should have 'Title': 'Description'. For example: 'Title': 'The Beginning of a Tragedy','Description': 'The people are kind to the new doctor in town, but under the guise of healing their wounds, the doctor slowly conducts experiments.' The response format is exactly the same as the frames in the example."),
        MessagesPlaceholder(variable_name="chat_history"),
        ("human", "{current_story}")
    ])
    
    def load_memory():
        return memory.load_memory_variables({})["chat_history"]
    
    def parse_recommendations(recommendation_text):
        recommendations = []
        try:
            rec_lines = recommendation_text.split('\n')
            title, description = None, None
            for line in rec_lines:
                if line.startswith("Title:"):
                    if title and description:
                        recommendations.append(
                            {"Title": title, "Description": description})
                    title = line.split("Title:", 1)[1].strip()
                    description = None
                elif line.startswith("Description:"):
                    description = line.split("Description:", 1)[1].strip()
                    if title and description:
                        recommendations.append(
                            {"Title": title, "Description": description})
                        title, description = None, None
                if len(recommendations) == 3:
                    break
        except Exception as e:
            logging.error(f"Error parsing recommendations: {e}")

        return recommendations
    
    def generate_recommendations(chat_history, current_story):
        formatted_recommendation_prompt = recommend_template.format(
            chat_history=chat_history, current_story=current_story)
        recommendation_result = llm.invoke(formatted_recommendation_prompt)
        recommendations = parse_recommendations(recommendation_result.content)
        return recommendations

    def remove_recommendation_paths(final_summary):
        pattern = re.compile(r'Recommended summary paths:.*$', re.DOTALL)
        cleaned_story = re.sub(pattern, '', final_summary).strip()
        return cleaned_story


    chat_history = load_memory()
    prompt = f"""
    Story Prompt: {summary}
    Previous Story: {chat_history}
    Write a concise, realistic, and engaging summary based on the above information. Highlight both hope and despair in the narrative. Make it provocative and creative.
    """

    formatted_final_prompt = summary_template.format(chat_history=chat_history, prompt=prompt)
    result = llm.invoke(formatted_final_prompt)
    memory.save_context({"input": prompt}, {"output": result.content})

    cleaned_story = remove_recommendation_paths(result.content)
    recommendations = generate_recommendations(chat_history, result.content)

    return {"final_summary": cleaned_story, "recommendations": recommendations}

memory를 사용하여 소설의 내용이 이어지도록 다음 summary를 생성하도록 한다. ConversationSummaryBufferMemory를 사용하여 일정 토큰까지는 결과값을 그대로 저장하고 토큰의 값보다 많이 저장되기 시작하면 오래된 데이터를 요약하여 저장하면서 소설의 내용이 뜬금없는 값이 되지 않도록 한다.

0개의 댓글