Unverified Commit 8d0d8c78 authored by jinyuan sun's avatar jinyuan sun Committed by GitHub
Browse files

Merge pull request #42 from ChatMol/get_ride_of_openai

Beta version of ChatMol Pymol plugin v 2.1
parents 0685660e cb9912d7
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -142,3 +142,4 @@ cf08d4763cf8b0309f2aa182c253388d712a1b736d4eb34f226c74588995faa0*
e9bbca7f41ac6c03b3a6c3193115e8ac8a4c4fd572a4230577a81c432b2cacfe*
06377e8a560dca176f9a7d7ac9e3184c6c67e13aa1e9a6a3972e2fde8952375b*
copilot_public/Project-*
chatmol_claude_test.py
 No newline at end of file
+11 −4
Original line number Diff line number Diff line
@@ -6,13 +6,14 @@ st.sidebar.markdown("Welcome to ChatMol! ChatMol is a tool that allows you to in

openai_llms = ['gpt-4o', 'gpt-4-turbo', 'gpt-3.5-turbo']
claude_llms = ['claude-3-5-sonnet-20240620', 'claude-3-sonnet-20240229', 'claude-3-haiku-20240307', 'claude-3-opus-20240229']
chatmol_llms = ['chatlite']
chatmol_llms = ['chatlite', "chatmol-llm-0.1"]

introduction_of_models = {
    'gpt-4o': "GPT-4o (“o” for “omni”) is most advanced model of OpenAI. It has the same high intelligence as GPT-4 Turbo but is much more efficient—it generates text 2x faster and is 50% cheaper.",
    'gpt-4-turbo': "GPT-4 can solve difficult problems with greater accuracy than any of previous models of OpenAI, thanks to its broader general knowledge and advanced reasoning capabilities.",
    'gpt-3.5-turbo': "GPT-3.5 Turbo models can understand and generate natural language or code and have been optimized for chat.",
    'chatlite': "A model provided by ChatMol freely available to all, which is optimized for PyMOL commands generation but not good for general chat.",
    'chatlite': "A service provided by ChatMol freely available to all, which is optimized for PyMOL commands generation but not good for general chat.",
    'chatmol-llm-0.1': "A model provided by ChatMol freely available to all, which is optimized for PyMOL commands generation but still can be used for general chat.",
    'claude-3-5-sonnet-20240620': "Most intelligent model of Anthropic, combining top-tier performance with improved speed. Currently the only model in the Claude 3.5 family.\n - Advanced research and analysis\n - Complex problem-solving\n - Sophisticated language understanding and generation\n - High-level strategic planning",
    'claude-3-sonnet-20240229': "Balances intelligence and speed for high-throughput tasks.\n - Data processing over vast amounts of knowledge\n - Sales forecasting and targeted marketing\n - Code generation and quality control",
    'claude-3-haiku-20240307': "Near-instant responsiveness that can mimic human interactions.\n - Live support chat\n - Translations\n - Content moderation\n - Extracting knowledge from unstructured data",
@@ -69,14 +70,20 @@ if prompt := st.chat_input("What is up?"):
            elif st.session_state["llm"] in claude_llms:
                response = st.session_state["cm"].chat_with_claude(f"This is the log: \n\n{st.session_state['ps'].pymol_console}\n\n. This is my question: \n\n{prompt}")
            elif st.session_state["llm"] in chatmol_llms:
                response = st.session_state["cm"].chatlite(f"Instruction: {prompt}")
                if st.session_state["llm"] == "chatlite":
                    response = st.session_state["ps"].chatlite(f"Instruction: {prompt}")
                else:
                    response = st.session_state["ps"].chatmol(f"{prompt}")
        else:
            if st.session_state["llm"] in openai_llms:
                response = st.session_state["ps"].chatgpt(f"This is the log: \n\n{st.session_state['ps'].pymol_console}\n\n. This is my instruction: \n\n{prompt}")
            elif st.session_state["llm"] in claude_llms:
                response = st.session_state["ps"].claude(f"This is the log: \n\n{st.session_state['ps'].pymol_console}\n\n. This is my instruction: \n\n{prompt}")
            elif st.session_state["llm"] in chatmol_llms:
                if st.session_state["llm"] == "chatlite":
                    response = st.session_state["ps"].chatlite(f"Instruction: {prompt}")
                else:
                    response = st.session_state["ps"].chatmol(f"{prompt}")
    
        st.session_state.messages.append({"role": "assistant", "content": response})
    with st.chat_message("assistant"):
+1 −1
Original line number Diff line number Diff line
@@ -65,7 +65,7 @@ def start_server():
def init_server():
    server_thread = threading.Thread(target=start_server)
    server_thread.start()
    # server_thread.join()

    print("Server started")

conversation_history = ""
+6 −1
Original line number Diff line number Diff line
@@ -12,6 +12,9 @@ def chat_with_gpt(message):
def chat_with_claude(message):
    return defaul_client.chat_with_claude(message)

def chat_with_chatmol_llm(message):
    return defaul_client.chat_with_chatmol_llm(message)

def clear_stashed_commands():
    return defaul_client.clear_stashed_commands()

@@ -25,3 +28,5 @@ def start_pymol_gui():

def warnings():
    return defaul_client.warnings

__version__ = "0.3.0"
 No newline at end of file
+44 −1
Original line number Diff line number Diff line
@@ -151,3 +151,46 @@ class PymolServer():
        except Exception as e:
            print(f"Error during command execution: {e}")
        return response
    
    def chatmol(self, message, execute:bool=True):
        message = message.strip()
        if message == "e" or message == 'execute':
            if len(self.cm.stashed_commands) == 0:
                print("There is no stashed commands")
            else:
                for command in self.cm.stashed_commands:
                    self.server.do(command)
                self.cm.clear_stashed_commands()
            return 0
        
        if message == "new":
            self.cm.clear_chat_history()
            self.cm.clear_stashed_commands()
            return 0
        
        if message.endswith('?'):
            execute = False
        
        response = self.cm.chat_with_chatmol_llm(message)  # Using the chat_with_gpt method
        print("ChatMol:", response)
        try:
            command_blocks = []
            self.cm.clear_stashed_commands()
            for i, block in enumerate(response.split("```")):
                if i % 2 == 1:
                    command_blocks.append(block)
            for command_block in command_blocks:
                for command in command_block.split("\n"):
                    if command.strip() and not command.strip().startswith("#"):
                        if command.strip() == "pymol" or command.strip() == "bash":
                            continue  # Skipping python commands
                        if "#" in command:
                            command, comment = command.split("#")
                        if execute:
                            print(command)
                            self.server.do(command)
                        else:
                            self.cm.stashed_commands.append(command)
        except Exception as e:
            print(f"Error during command execution: {e}")
        return response
Loading