from langchain.chains import LLMChainfrom langchain.chat_models import ChatOpenAIfrom langchain.prompts import PromptTemplatefrom galileo_observe import GalileoObserveCallbackfrom datetime import datetimeimport timeimport randomimport osos.environ["GALILEO_CONSOLE_URL"] = YOUR_GALILEO_CONSOLE_URLos.environ["OPENAI_API_KEY"] = YOUR_OPEN_AI_KEYos.environ["GALILEO_API_KEY"] = YOUR_GALILEO_API_KEYclass MonitoringApp: def run_llm(self, llm, user_prompt): prompt = PromptTemplate.from_template("Answer the following question: {user_prompt}") chain = prompt | llm | StrOutputParser() result = chain.invoke(dict(user_prompt=user_prompt), config=dict(callbacks=[GalileoObserveCallback(project_name='first-project')]))app = MonitoringApp()llm = ChatOpenAI( temperature=0)questions = [ "Why is the sky blue?", "How do magnets work?", "Why do apples fall from trees?", "What is gravity?", "How does the moon affect tides?", "Why is the ocean salty?", "What causes thunder?", "How do plants make food?", "Why do we have seasons?", "How do rainbows form?"]while True: question = random.choice(questions) print(f"Querying the LLM: {question}") app.run_llm(llm, question) time.sleep(5)