Introduction to Model Context Protocol (MCP)
James Chapman
AI Curriculum Manager, DataCamp
→ Continue with the OpenAI Responses API





from mcp.server.fastmcp import FastMCP
mcp = FastMCP("Timezone Converter")
@mcp.tool()
def convert_timezone(date_time: str, from_timezone: str, to_timezone: str) -> str:
# ...
@mcp.resource("file://locations.txt")
def get_locations() -> str:
# ...
@mcp.prompt(title="Timezone Conversion")
def convert_timezone_prompt(timezone_request: str) -> str:
# ...
if __name__ == "__main__":
mcp.run(transport="stdio")
read_resource(resource_uri): fetches a resource's contents by URIread_prompt(prompt_name, user_input): fetches the prompt template with the user's request injectedasync def get_context_from_mcp(user_query: str) -> tuple[str, str]: """Fetch resource content and prompt text from the MCP server.""" params = StdioServerParameters(command=sys.executable, args=["timezone_server.py"]) async with stdio_client(params) as (reader, writer): async with ClientSession(reader, writer) as session: await session.initialize()# Get the resource (supported locations) resource_result = await session.read_resource("file://locations.txt") resource_text = resource_result.contents[0].text# Get the prompt with the user's query prompt_result = await session.get_prompt("convert_timezone_prompt", arguments={"timezone_request": user_query}) prompt_text = prompt_result.messages[0].content.textreturn resource_text, prompt_text
async def call_llm_with_context(user_query: str): """Call the LLM with resource and prompt context from MCP."""resource_text, prompt_text = await get_context_from_mcp(user_query)# Combine prompt (task + rules + user request) with resource (supported locations) full_prompt = prompt_text + "\n\nSupported locations:\n" + resource_textclient = AsyncOpenAI(api_key="<OPENAI_API_TOKEN>") response = await client.responses.create( model="gpt-4o-mini", input=full_prompt, tools=openai_tools, # from get_tools_from_mcp(), formatted for OpenAI )
output = response.output[0]if output.type == "message": print(f"\nAssistant: {output.content[0].text}") return str(output.content[0].text)if output.type == "function_call": args = json.loads(output.arguments) result = await call_mcp_tool(output.name, args) followup = await client.responses.create( model="gpt-4o-mini", input=[ {"role": "user", "content": user_query}, output, {"type": "function_call_output", "call_id": output.call_id, "output": result} ], ) # ... then print the assistant's reply from followup.output
if __name__ == "__main__":
asyncio.run(call_llm_with_context("What time is it in Canada?"))
Assistant: Canada has several time zones. Which city or region do you mean?
For example, Toronto, Vancouver, or Halifax?
if __name__ == "__main__":
asyncio.run(call_llm_with_context("It is 9:50 AM in the UK in January. What
time is it in Lisbon, Portugal?"))
Assistant: It's 9:50 AM in Lisbon as well.

Introduction to Model Context Protocol (MCP)