From a58e2cf08e70f6a228df0bc51b09309968cb7c4e Mon Sep 17 00:00:00 2001 From: Thomas Kosmas Date: Tue, 19 Aug 2025 12:31:51 +0300 Subject: [PATCH 01/13] update to v2 --- package-lock.json | 75 ++++--- package.json | 8 +- src/index.test.ts | 388 ---------------------------------- src/index.ts | 515 ++++++++++++++++++++++++++++------------------ 4 files changed, 367 insertions(+), 619 deletions(-) delete mode 100644 src/index.test.ts diff --git a/package-lock.json b/package-lock.json index de77fb4..41b1ef9 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,19 +1,20 @@ { "name": "firecrawl-mcp", - "version": "1.11.0", + "version": "1.12.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "firecrawl-mcp", - "version": "1.11.0", + "version": "1.12.0", "license": "MIT", "dependencies": { - "@mendable/firecrawl-js": "^1.19.0", - "@modelcontextprotocol/sdk": "^1.4.1", + "@mendable/firecrawl-js": "^3.0.3", + "@modelcontextprotocol/sdk": "^1.17.3", "dotenv": "^16.4.7", "express": "^5.1.0", "shx": "^0.3.4", + "typescript": "^5.9.2", "ws": "^8.18.1" }, "bin": { @@ -31,8 +32,7 @@ "jest": "^29.7.0", "jest-mock-extended": "^4.0.0-beta1", "prettier": "^3.1.1", - "ts-jest": "^29.1.1", - "typescript": "^5.3.3" + "ts-jest": "^29.1.1" }, "engines": { "node": ">=18.0.0" @@ -1090,11 +1090,12 @@ } }, "node_modules/@mendable/firecrawl-js": { - "version": "1.29.0", - "resolved": "https://registry.npmjs.org/@mendable/firecrawl-js/-/firecrawl-js-1.29.0.tgz", - "integrity": "sha512-ZS97rwri5ZZmqDWy7VQJlzCmNFATSvUj+LNBtMj//Rs6fm/uIsyOU5Noq6zWVWKLqFsuQnDM5wnMz8q0JFRi/w==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@mendable/firecrawl-js/-/firecrawl-js-3.0.3.tgz", + "integrity": "sha512-DJKilb346tmJ+gqmXiOOpv5c34axHSoZIb4X/DoB0EV8bo6h3JloRvHnW5i8/VOL5Xb4P3RKz8VhPH7j0D7+Aw==", + "license": "MIT", "dependencies": { - "axios": "^1.6.8", + "axios": "^1.11.0", "typescript-event-target": "^1.1.1", "zod": "^3.23.8", "zod-to-json-schema": "^3.23.0" @@ -1104,15 +1105,17 @@ } }, "node_modules/@modelcontextprotocol/sdk": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.13.2.tgz", - "integrity": "sha512-Vx7qOcmoKkR3qhaQ9qf3GxiVKCEu+zfJddHv6x3dY/9P6+uIwJnmuAur5aB+4FDXf41rRrDnOEGkviX5oYZ67w==", + "version": "1.17.3", + "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.17.3.tgz", + "integrity": "sha512-JPwUKWSsbzx+DLFznf/QZ32Qa+ptfbUlHhRLrBQBAFu9iI1iYvizM4p+zhhRDceSsPutXp4z+R/HPVphlIiclg==", + "license": "MIT", "dependencies": { "ajv": "^6.12.6", "content-type": "^1.0.5", "cors": "^2.8.5", "cross-spawn": "^7.0.5", "eventsource": "^3.0.2", + "eventsource-parser": "^3.0.0", "express": "^5.0.1", "express-rate-limit": "^7.5.0", "pkce-challenge": "^5.0.0", @@ -1711,15 +1714,17 @@ "node_modules/asynckit": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" }, "node_modules/axios": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.10.0.tgz", - "integrity": "sha512-/1xYAC4MP/HEG+3duIhFr4ZQXR4sQXOIe+o6sdqzeykGLx6Upp/1p8MHqhINOvGeP7xyNHe7tsiJByc4SSVUxw==", + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.11.0.tgz", + "integrity": "sha512-1Lx3WLFQWm3ooKDYZD1eXmoGO9fxYQjrycfHFC8P0sCfQVXyROp0p9PFWBehewBOdCwHc+f/b8I0fMto5eSfwA==", + "license": "MIT", "dependencies": { "follow-redirects": "^1.15.6", - "form-data": "^4.0.0", + "form-data": "^4.0.4", "proxy-from-env": "^1.1.0" } }, @@ -2117,6 +2122,7 @@ "version": "1.0.8", "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", "dependencies": { "delayed-stream": "~1.0.0" }, @@ -2265,6 +2271,7 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", "engines": { "node": ">=0.4.0" } @@ -2435,6 +2442,7 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", "dependencies": { "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", @@ -2951,15 +2959,16 @@ "dev": true }, "node_modules/follow-redirects": { - "version": "1.15.9", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz", - "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==", + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", "funding": [ { "type": "individual", "url": "https://github.com/sponsors/RubenVerborgh" } ], + "license": "MIT", "engines": { "node": ">=4.0" }, @@ -2970,9 +2979,10 @@ } }, "node_modules/form-data": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.3.tgz", - "integrity": "sha512-qsITQPfmvMOSAdeyZ+12I1c+CKSstAFAwu+97zrnWAbIr5u8wfsExUzCesVLC8NgHuRUqNN4Zy6UPWUTRGslcA==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz", + "integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==", + "license": "MIT", "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", @@ -2988,6 +2998,7 @@ "version": "1.52.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -2996,6 +3007,7 @@ "version": "2.1.35", "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", "dependencies": { "mime-db": "1.52.0" }, @@ -3254,6 +3266,7 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "license": "MIT", "dependencies": { "has-symbols": "^1.0.3" }, @@ -4888,7 +4901,8 @@ "node_modules/proxy-from-env": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", - "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "license": "MIT" }, "node_modules/punycode": { "version": "2.3.1", @@ -5684,10 +5698,10 @@ } }, "node_modules/typescript": { - "version": "5.8.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz", - "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==", - "dev": true, + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.2.tgz", + "integrity": "sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A==", + "license": "Apache-2.0", "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -5699,7 +5713,8 @@ "node_modules/typescript-event-target": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/typescript-event-target/-/typescript-event-target-1.1.1.tgz", - "integrity": "sha512-dFSOFBKV6uwaloBCCUhxlD3Pr/P1a/tJdcmPrTXCHlEFD3faj0mztjcGn6VBAhQ0/Bdy8K3VWrrqwbt/ffsYsg==" + "integrity": "sha512-dFSOFBKV6uwaloBCCUhxlD3Pr/P1a/tJdcmPrTXCHlEFD3faj0mztjcGn6VBAhQ0/Bdy8K3VWrrqwbt/ffsYsg==", + "license": "MIT" }, "node_modules/undici-types": { "version": "6.21.0", diff --git a/package.json b/package.json index 3a3af7a..5d10edf 100644 --- a/package.json +++ b/package.json @@ -24,11 +24,12 @@ }, "license": "MIT", "dependencies": { - "@mendable/firecrawl-js": "^1.19.0", - "@modelcontextprotocol/sdk": "^1.4.1", + "@mendable/firecrawl-js": "^3.0.3", + "@modelcontextprotocol/sdk": "^1.17.3", "dotenv": "^16.4.7", "express": "^5.1.0", "shx": "^0.3.4", + "typescript": "^5.9.2", "ws": "^8.18.1" }, "devDependencies": { @@ -43,8 +44,7 @@ "jest": "^29.7.0", "jest-mock-extended": "^4.0.0-beta1", "prettier": "^3.1.1", - "ts-jest": "^29.1.1", - "typescript": "^5.3.3" + "ts-jest": "^29.1.1" }, "engines": { "node": ">=18.0.0" diff --git a/src/index.test.ts b/src/index.test.ts deleted file mode 100644 index 1ffe829..0000000 --- a/src/index.test.ts +++ /dev/null @@ -1,388 +0,0 @@ -import { Server } from '@modelcontextprotocol/sdk/server/index.js'; -import { CallToolRequestSchema } from '@modelcontextprotocol/sdk/types.js'; -import FirecrawlApp from '@mendable/firecrawl-js'; -import type { - SearchResponse, - BatchScrapeResponse, - BatchScrapeStatusResponse, - CrawlResponse, - CrawlStatusResponse, - ScrapeResponse, - FirecrawlDocument, - SearchParams, -} from '@mendable/firecrawl-js'; -import { - describe, - expect, - jest, - test, - beforeEach, - afterEach, -} from '@jest/globals'; -import { mock, MockProxy } from 'jest-mock-extended'; - -// Mock FirecrawlApp -jest.mock('@mendable/firecrawl-js'); - -// Test interfaces -interface RequestParams { - method: string; - params: { - name: string; - arguments?: Record; - }; -} - -interface BatchScrapeArgs { - urls: string[]; - options?: { - formats?: string[]; - [key: string]: any; - }; -} - -interface StatusCheckArgs { - id: string; -} - -interface SearchArgs { - query: string; - scrapeOptions?: { - formats?: string[]; - onlyMainContent?: boolean; - }; -} - -interface ScrapeArgs { - url: string; - formats?: string[]; - onlyMainContent?: boolean; -} - -interface CrawlArgs { - url: string; - maxDepth?: number; - limit?: number; -} - -// Mock client interface -interface MockFirecrawlClient { - scrapeUrl(url: string, options?: any): Promise; - search(query: string, params?: SearchParams): Promise; - asyncBatchScrapeUrls( - urls: string[], - options?: any - ): Promise; - checkBatchScrapeStatus(id: string): Promise; - asyncCrawlUrl(url: string, options?: any): Promise; - checkCrawlStatus(id: string): Promise; - mapUrl(url: string, options?: any): Promise<{ links: string[] }>; -} - -describe('Firecrawl Tool Tests', () => { - let mockClient: MockProxy; - let requestHandler: (request: RequestParams) => Promise; - - beforeEach(() => { - jest.clearAllMocks(); - mockClient = mock(); - - // Set up mock implementations - const mockInstance = new FirecrawlApp({ apiKey: 'test' }); - Object.assign(mockInstance, mockClient); - - // Create request handler - requestHandler = async (request: RequestParams) => { - const { name, arguments: args } = request.params; - if (!args) { - throw new Error('No arguments provided'); - } - return handleRequest(name, args, mockClient); - }; - }); - - afterEach(() => { - jest.clearAllMocks(); - }); - - // Test scrape functionality - test('should handle scrape request', async () => { - const url = 'https://example.com'; - const options = { formats: ['markdown'] }; - - const mockResponse: ScrapeResponse = { - success: true, - markdown: '# Test Content', - html: undefined, - rawHtml: undefined, - url: 'https://example.com', - actions: undefined as never, - }; - - mockClient.scrapeUrl.mockResolvedValueOnce(mockResponse); - - const response = await requestHandler({ - method: 'call_tool', - params: { - name: 'firecrawl_scrape', - arguments: { url, ...options }, - }, - }); - - expect(response).toEqual({ - content: [{ type: 'text', text: '# Test Content' }], - isError: false, - }); - expect(mockClient.scrapeUrl).toHaveBeenCalledWith(url, { - formats: ['markdown'], - url, - }); - }); - - // Test scrape with maxAge parameter - test('should handle scrape request with maxAge parameter', async () => { - const url = 'https://example.com'; - const options = { formats: ['markdown'], maxAge: 3600000 }; - - const mockResponse: ScrapeResponse = { - success: true, - markdown: '# Test Content', - html: undefined, - rawHtml: undefined, - url: 'https://example.com', - actions: undefined as never, - }; - - mockClient.scrapeUrl.mockResolvedValueOnce(mockResponse); - - const response = await requestHandler({ - method: 'call_tool', - params: { - name: 'firecrawl_scrape', - arguments: { url, ...options }, - }, - }); - - expect(response).toEqual({ - content: [{ type: 'text', text: '# Test Content' }], - isError: false, - }); - expect(mockClient.scrapeUrl).toHaveBeenCalledWith(url, { - formats: ['markdown'], - maxAge: 3600000, - url, - }); - }); - - // Test batch scrape functionality - test('should handle batch scrape request', async () => { - const urls = ['https://example.com']; - const options = { formats: ['markdown'] }; - - mockClient.asyncBatchScrapeUrls.mockResolvedValueOnce({ - success: true, - id: 'test-batch-id', - }); - - const response = await requestHandler({ - method: 'call_tool', - params: { - name: 'firecrawl_batch_scrape', - arguments: { urls, options }, - }, - }); - - expect(response.content[0].text).toContain( - 'Batch operation queued with ID: batch_' - ); - expect(mockClient.asyncBatchScrapeUrls).toHaveBeenCalledWith(urls, options); - }); - - // Test search functionality - test('should handle search request', async () => { - const query = 'test query'; - const scrapeOptions = { formats: ['markdown'] }; - - const mockSearchResponse: SearchResponse = { - success: true, - data: [ - { - url: 'https://example.com', - title: 'Test Page', - description: 'Test Description', - markdown: '# Test Content', - actions: undefined as never, - }, - ], - }; - - mockClient.search.mockResolvedValueOnce(mockSearchResponse); - - const response = await requestHandler({ - method: 'call_tool', - params: { - name: 'firecrawl_search', - arguments: { query, scrapeOptions }, - }, - }); - - expect(response.isError).toBe(false); - expect(response.content[0].text).toContain('Test Page'); - expect(mockClient.search).toHaveBeenCalledWith(query, scrapeOptions); - }); - - // Test crawl functionality - test('should handle crawl request', async () => { - const url = 'https://example.com'; - const options = { maxDepth: 2 }; - - mockClient.asyncCrawlUrl.mockResolvedValueOnce({ - success: true, - id: 'test-crawl-id', - }); - - const response = await requestHandler({ - method: 'call_tool', - params: { - name: 'firecrawl_crawl', - arguments: { url, ...options }, - }, - }); - - expect(response.isError).toBe(false); - expect(response.content[0].text).toContain('test-crawl-id'); - expect(mockClient.asyncCrawlUrl).toHaveBeenCalledWith(url, { - maxDepth: 2, - url, - }); - }); - - // Test error handling - test('should handle API errors', async () => { - const url = 'https://example.com'; - - mockClient.scrapeUrl.mockRejectedValueOnce(new Error('API Error')); - - const response = await requestHandler({ - method: 'call_tool', - params: { - name: 'firecrawl_scrape', - arguments: { url }, - }, - }); - - expect(response.isError).toBe(true); - expect(response.content[0].text).toContain('API Error'); - }); - - // Test rate limiting - test('should handle rate limits', async () => { - const url = 'https://example.com'; - - // Mock rate limit error - mockClient.scrapeUrl.mockRejectedValueOnce( - new Error('rate limit exceeded') - ); - - const response = await requestHandler({ - method: 'call_tool', - params: { - name: 'firecrawl_scrape', - arguments: { url }, - }, - }); - - expect(response.isError).toBe(true); - expect(response.content[0].text).toContain('rate limit exceeded'); - }); -}); - -// Helper function to simulate request handling -async function handleRequest( - name: string, - args: any, - client: MockFirecrawlClient -) { - try { - switch (name) { - case 'firecrawl_scrape': { - const response = await client.scrapeUrl(args.url, args); - if (!response.success) { - throw new Error(response.error || 'Scraping failed'); - } - return { - content: [ - { type: 'text', text: response.markdown || 'No content available' }, - ], - isError: false, - }; - } - - case 'firecrawl_batch_scrape': { - const response = await client.asyncBatchScrapeUrls( - args.urls, - args.options - ); - return { - content: [ - { - type: 'text', - text: `Batch operation queued with ID: batch_1. Use firecrawl_check_batch_status to check progress.`, - }, - ], - isError: false, - }; - } - - case 'firecrawl_search': { - const response = await client.search(args.query, args.scrapeOptions); - if (!response.success) { - throw new Error(response.error || 'Search failed'); - } - const results = response.data - .map( - (result) => - `URL: ${result.url}\nTitle: ${ - result.title || 'No title' - }\nDescription: ${result.description || 'No description'}\n${ - result.markdown ? `\nContent:\n${result.markdown}` : '' - }` - ) - .join('\n\n'); - return { - content: [{ type: 'text', text: results }], - isError: false, - }; - } - - case 'firecrawl_crawl': { - const response = await client.asyncCrawlUrl(args.url, args); - if (!response.success) { - throw new Error(response.error); - } - return { - content: [ - { - type: 'text', - text: `Started crawl for ${args.url} with job ID: ${response.id}`, - }, - ], - isError: false, - }; - } - - default: - throw new Error(`Unknown tool: ${name}`); - } - } catch (error) { - return { - content: [ - { - type: 'text', - text: error instanceof Error ? error.message : String(error), - }, - ], - isError: true, - }; - } -} diff --git a/src/index.ts b/src/index.ts index ff2550d..676a3e4 100644 --- a/src/index.ts +++ b/src/index.ts @@ -9,14 +9,16 @@ import { ListToolsRequestSchema, } from '@modelcontextprotocol/sdk/types.js'; import FirecrawlApp, { - type ScrapeParams, - type MapParams, - type CrawlParams, - type FirecrawlDocument, + type ScrapeOptions, + type MapOptions, + type Document, } from '@mendable/firecrawl-js'; +import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js'; + import express, { Request, Response } from 'express'; import dotenv from 'dotenv'; +import { randomUUID } from 'node:crypto'; dotenv.config(); @@ -64,6 +66,7 @@ This is the most powerful, fastest and most reliable scraper tool, if available 'links', 'screenshot@fullPage', 'extract', + 'summary', ], }, default: ['markdown'], @@ -71,6 +74,7 @@ This is the most powerful, fastest and most reliable scraper tool, if available }, onlyMainContent: { type: 'boolean', + default: true, description: 'Extract only the main content, filtering out navigation, footers, etc.', }, @@ -88,11 +92,6 @@ This is the most powerful, fastest and most reliable scraper tool, if available type: 'number', description: 'Time in milliseconds to wait for dynamic content to load', }, - timeout: { - type: 'number', - description: - 'Maximum time in milliseconds to wait for the page to load', - }, actions: { type: 'array', items: { @@ -191,9 +190,17 @@ This is the most powerful, fastest and most reliable scraper tool, if available }, description: 'Location settings for scraping', }, + storeInCache: { + type: 'boolean', + default: true, + description: + 'If true, the page will be stored in the Firecrawl index and cache. Setting this to false is useful if your scraping activity may have data protection concerns.', + }, maxAge: { type: 'number', - description: 'Maximum age in milliseconds for cached content. Use cached data if available and younger than maxAge, otherwise scrape fresh. Enables 500% faster scrapes for recently cached pages. Default: 0 (always scrape fresh)', + default: 172800000, + description: + 'Maximum age in milliseconds for cached content. Use cached data if available and younger than maxAge, otherwise scrape fresh. Enables 500% faster scrapes for recently cached pages. Default: 172800000', }, }, required: ['url'], @@ -231,22 +238,27 @@ Map a website to discover all indexed URLs on the site. type: 'string', description: 'Optional search term to filter URLs', }, - ignoreSitemap: { - type: 'boolean', - description: 'Skip sitemap.xml discovery and only use HTML links', - }, - sitemapOnly: { - type: 'boolean', - description: 'Only use sitemap.xml for discovery, ignore HTML links', + sitemap: { + type: 'string', + enum: ['skip', 'include', 'only'], + default: 'include', + description: + 'Sitemap mode when mapping. If set to skip, the sitemap will not be used. If set to only, only URLs that are in the sitemap will be returned. By default (include), the sitemap and other methods will be used together to find URLs.', }, includeSubdomains: { type: 'boolean', description: 'Include URLs from subdomains in results', }, + limit: { type: 'number', description: 'Maximum number of URLs to return', }, + ignoreQueryParameters: { + type: 'boolean', + default: true, + description: 'Do not return URLs with query parameters', + }, }, required: ['url'], }, @@ -255,28 +267,29 @@ Map a website to discover all indexed URLs on the site. const CRAWL_TOOL: Tool = { name: 'firecrawl_crawl', description: ` -Starts an asynchronous crawl job on a website and extracts content from all pages. - -**Best for:** Extracting content from multiple related pages, when you need comprehensive coverage. -**Not recommended for:** Extracting content from a single page (use scrape); when token limits are a concern (use map + batch_scrape); when you need fast results (crawling can be slow). -**Warning:** Crawl responses can be very large and may exceed token limits. Limit the crawl depth and number of pages, or use map + batch_scrape for better control. -**Common mistakes:** Setting limit or maxDepth too high (causes token overflow); using crawl for a single page (use scrape instead). -**Prompt Example:** "Get all blog posts from the first two levels of example.com/blog." -**Usage Example:** -\`\`\`json -{ - "name": "firecrawl_crawl", - "arguments": { - "url": "https://example.com/blog/*", - "maxDepth": 2, - "limit": 100, - "allowExternalLinks": false, - "deduplicateSimilarURLs": true - } -} -\`\`\` -**Returns:** Operation ID for status checking; use firecrawl_check_crawl_status to check progress. -`, + Starts an asynchronous crawl job on a website and extracts content from all pages. + + **Best for:** Extracting content from multiple related pages, when you need comprehensive coverage. + **Not recommended for:** Extracting content from a single page (use scrape); when token limits are a concern (use map + batch_scrape); when you need fast results (crawling can be slow). + **Warning:** Crawl responses can be very large and may exceed token limits. Limit the crawl depth and number of pages, or use map + batch_scrape for better control. + **Common mistakes:** Setting limit or maxDiscoveryDepth too high (causes token overflow); using crawl for a single page (use scrape instead). + **Prompt Example:** "Get all blog posts from the first two levels of example.com/blog." + **Usage Example:** + \`\`\`json + { + "name": "firecrawl_crawl", + "arguments": { + "url": "https://example.com/blog/*", + "maxDiscoveryDepth": 2, + "limit": 100, + "allowExternalLinks": false, + "deduplicateSimilarURLs": true, + "sitemap": "include" + } + } + \`\`\` + **Returns:** Operation ID for status checking; use firecrawl_check_crawl_status to check progress. + `, inputSchema: { type: 'object', properties: { @@ -284,6 +297,11 @@ Starts an asynchronous crawl job on a website and extracts content from all page type: 'string', description: 'Starting URL for the crawl', }, + prompt: { + type: 'string', + description: + 'Natural language prompt to generate crawler options. Explicitly set parameters will override generated ones.', + }, excludePaths: { type: 'array', items: { type: 'string' }, @@ -294,26 +312,48 @@ Starts an asynchronous crawl job on a website and extracts content from all page items: { type: 'string' }, description: 'Only crawl these URL paths', }, - maxDepth: { + maxDiscoveryDepth: { type: 'number', - description: 'Maximum link depth to crawl', + description: + 'Maximum discovery depth to crawl. The root site and sitemapped pages have depth 0.', }, - ignoreSitemap: { - type: 'boolean', - description: 'Skip sitemap.xml discovery', + sitemap: { + type: 'string', + enum: ['skip', 'include', 'only'], + default: 'include', + description: + "Sitemap mode when crawling. 'skip' ignores the sitemap entirely, 'include' uses sitemap plus other discovery methods (default), 'only' restricts crawling to sitemap URLs.", }, limit: { type: 'number', - description: 'Maximum number of pages to crawl', - }, - allowBackwardLinks: { - type: 'boolean', - description: 'Allow crawling links that point to parent directories', + default: 10000, + description: 'Maximum number of pages to crawl (default: 10000)', }, allowExternalLinks: { type: 'boolean', description: 'Allow crawling links to external domains', }, + allowSubdomains: { + type: 'boolean', + default: false, + description: 'Allow crawling links to subdomains of the main domain', + }, + crawlEntireDomain: { + type: 'boolean', + default: false, + description: + 'When true, follow internal links to sibling or parent URLs, not just child paths', + }, + delay: { + type: 'number', + description: + 'Delay in seconds between scrapes to respect site rate limits', + }, + maxConcurrency: { + type: 'number', + description: + 'Maximum number of concurrent scrapes; if unset, team limit is used', + }, webhook: { oneOf: [ { @@ -342,7 +382,9 @@ Starts an asynchronous crawl job on a website and extracts content from all page }, ignoreQueryParameters: { type: 'boolean', - description: 'Ignore query parameters when comparing URLs', + default: false, + description: + 'Do not re-scrape the same path with different (or none) query parameters', }, scrapeOptions: { type: 'object', @@ -450,14 +492,6 @@ Search the web and optionally extract content from search results. This is the m type: 'number', description: 'Maximum number of results to return (default: 5)', }, - lang: { - type: 'string', - description: 'Language code for search results (default: en)', - }, - country: { - type: 'string', - description: 'Country code for search results (default: us)', - }, tbs: { type: 'string', description: 'Time-based search filter', @@ -467,19 +501,50 @@ Search the web and optionally extract content from search results. This is the m description: 'Search filter', }, location: { - type: 'object', - properties: { - country: { - type: 'string', - description: 'Country code for geolocation', - }, - languages: { - type: 'array', - items: { type: 'string' }, - description: 'Language codes for content', - }, + type: 'string', + description: 'Location parameter for search results', + }, + sources: { + type: 'array', + description: + 'Sources to search. Determines which result arrays are included in the response.', + items: { + oneOf: [ + { + type: 'object', + properties: { + type: { type: 'string', enum: ['web'] }, + tbs: { + type: 'string', + description: + 'Time-based search parameter (e.g., qdr:h, qdr:d, qdr:w, qdr:m, qdr:y or custom cdr with cd_min/cd_max)', + }, + location: { + type: 'string', + description: 'Location parameter for search results', + }, + }, + required: ['type'], + additionalProperties: false, + }, + { + type: 'object', + properties: { + type: { type: 'string', enum: ['images'] }, + }, + required: ['type'], + additionalProperties: false, + }, + { + type: 'object', + properties: { + type: { type: 'string', enum: ['news'] }, + }, + required: ['type'], + additionalProperties: false, + }, + ], }, - description: 'Location settings for search', }, scrapeOptions: { type: 'object', @@ -747,6 +812,19 @@ interface SearchOptions { excludeTags?: string[]; timeout?: number; }; + sources?: Array< + | { + type: 'web'; + tbs?: string; + location?: string; + } + | { + type: 'images'; + } + | { + type: 'news'; + } + >; } // Add after other interfaces @@ -782,7 +860,7 @@ interface ExtractResponse { // Type guards function isScrapeOptions( args: unknown -): args is ScrapeParams & { url: string } { +): args is ScrapeOptions & { url: string } { return ( typeof args === 'object' && args !== null && @@ -791,7 +869,7 @@ function isScrapeOptions( ); } -function isMapOptions(args: unknown): args is MapParams & { url: string } { +function isMapOptions(args: unknown): args is MapOptions & { url: string } { return ( typeof args === 'object' && args !== null && @@ -800,7 +878,8 @@ function isMapOptions(args: unknown): args is MapParams & { url: string } { ); } -function isCrawlOptions(args: unknown): args is CrawlParams & { url: string } { +//@ts-expect-error todo: fix +function isCrawlOptions(args: unknown): args is CrawlOptions & { url: string } { return ( typeof args === 'object' && args !== null && @@ -847,6 +926,26 @@ function isGenerateLLMsTextOptions( ); } +function removeEmptyTopLevel>( + obj: T +): Partial { + const out: Partial = {}; + for (const [k, v] of Object.entries(obj)) { + if (v == null) continue; + if (typeof v === 'string' && v.trim() === '') continue; + if (Array.isArray(v) && v.length === 0) continue; + if ( + typeof v === 'object' && + !Array.isArray(v) && + Object.keys(v).length === 0 + ) + continue; + // @ts-expect-error dynamic assignment + out[k] = v; + } + return out; +} + // Server implementation const server = new Server( { @@ -856,7 +955,6 @@ const server = new Server( { capabilities: { tools: {}, - logging: {}, }, } ); @@ -914,15 +1012,11 @@ function safeLog( | 'emergency', data: any ): void { - if (isStdioTransport) { - // For stdio transport, log to stderr to avoid protocol interference - console.error( - `[${level}] ${typeof data === 'object' ? JSON.stringify(data) : data}` - ); - } else { - // For other transport types, use the normal logging mechanism - server.sendLoggingMessage({ level, data }); - } + // Always log to stderr to avoid relying on MCP logging capability + const message = `[${level}] ${ + typeof data === 'object' ? JSON.stringify(data) : String(data) + }`; + console.error(message); } // Add retry logic with exponential backoff @@ -976,11 +1070,11 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => { const startTime = Date.now(); try { const { name, arguments: args } = request.params; - - const apiKey = process.env.CLOUD_SERVICE - ? (request.params._meta?.apiKey as string) - : FIRECRAWL_API_KEY; - if (process.env.CLOUD_SERVICE && !apiKey) { + const apiKey = + process.env.CLOUD_SERVICE === 'true' + ? (request.params._meta?.apiKey as string) + : FIRECRAWL_API_KEY; + if (process.env.CLOUD_SERVICE === 'true' && !apiKey) { throw new Error('No API key provided'); } @@ -1004,28 +1098,26 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => { throw new Error('Invalid arguments for firecrawl_scrape'); } const { url, ...options } = args; + const cleaned = removeEmptyTopLevel(options); try { const scrapeStartTime = Date.now(); safeLog( 'info', `Starting scrape for URL: ${url} with options: ${JSON.stringify(options)}` ); - - const response = await client.scrapeUrl(url, { - ...options, - // @ts-expect-error Extended API options including origin + const response = await client.scrape(url, { + ...cleaned, origin: 'mcp-server', }); - // Log performance metrics safeLog( 'info', `Scrape completed in ${Date.now() - scrapeStartTime}ms` ); - if ('success' in response && !response.success) { - throw new Error(response.error || 'Scraping failed'); - } + // if ('success' in response && !response.success) { + // throw new Error(response.error || 'Scraping failed'); + // } // Format content based on requested formats const contentParts = []; @@ -1045,8 +1137,8 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => { if (options.formats?.includes('screenshot') && response.screenshot) { contentParts.push(response.screenshot); } - if (options.formats?.includes('extract') && response.extract) { - contentParts.push(JSON.stringify(response.extract, null, 2)); + if (options.formats?.includes('json') && response.json) { + contentParts.push(JSON.stringify(response.json, null, 2)); } // If options.formats is empty, default to markdown @@ -1085,14 +1177,12 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => { throw new Error('Invalid arguments for firecrawl_map'); } const { url, ...options } = args; - const response = await client.mapUrl(url, { + const response = await client.map(url, { ...options, // @ts-expect-error Extended API options including origin origin: 'mcp-server', }); - if ('error' in response) { - throw new Error(response.error); - } + if (!response.links) { throw new Error('No links received from Firecrawl API'); } @@ -1137,10 +1227,8 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => { if (!isStatusCheckOptions(args)) { throw new Error('Invalid arguments for firecrawl_check_crawl_status'); } - const response = await client.checkCrawlStatus(args.id); - if (!response.success) { - throw new Error(response.error); - } + const response = await client.getCrawlStatus(args.id); + const status = `Crawl Status: Status: ${response.status} Progress: ${response.completed}/${response.total} @@ -1162,29 +1250,32 @@ ${ try { const response = await withRetry( async () => - client.search(args.query, { ...args, origin: 'mcp-server' }), + client.search(args.query, { + ...args, + // @ts-expect-error Extended API options including origin + origin: 'mcp-server', + }), 'search operation' ); - if (!response.success) { - throw new Error( - `Search failed: ${response.error || 'Unknown error'}` - ); - } - // Format the results - const results = response.data - .map( - (result) => - `URL: ${result.url} -Title: ${result.title || 'No title'} -Description: ${result.description || 'No description'} -${result.markdown ? `\nContent:\n${result.markdown}` : ''}` - ) - .join('\n\n'); + // const results = response.data + // .map( + // (result) => + // `URL: ${result.url} + // Title: ${result.title || 'No title'} + // Description: ${result.description || 'No description'} + // ${result.markdown ? `\nContent:\n${result.markdown}` : ''}` + // ) + // .join('\n\n'); return { - content: [{ type: 'text', text: trimResponseText(results) }], + content: [ + { + type: 'text', + text: trimResponseText(JSON.stringify(response, null, 2)), + }, + ], isError: false, }; } catch (error) { @@ -1219,7 +1310,8 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}` const extractResponse = await withRetry( async () => - client.extract(args.urls, { + client.extract({ + urls: args.urls, prompt: args.prompt, systemPrompt: args.systemPrompt, schema: args.schema, @@ -1293,76 +1385,6 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}` } } - case 'firecrawl_deep_research': { - if (!args || typeof args !== 'object' || !('query' in args)) { - throw new Error('Invalid arguments for firecrawl_deep_research'); - } - - try { - const researchStartTime = Date.now(); - safeLog('info', `Starting deep research for query: ${args.query}`); - - const response = await client.deepResearch( - args.query as string, - { - maxDepth: args.maxDepth as number, - timeLimit: args.timeLimit as number, - maxUrls: args.maxUrls as number, - // @ts-expect-error Extended API options including origin - origin: 'mcp-server', - }, - // Activity callback - (activity) => { - safeLog( - 'info', - `Research activity: ${activity.message} (Depth: ${activity.depth})` - ); - }, - // Source callback - (source) => { - safeLog( - 'info', - `Research source found: ${source.url}${source.title ? ` - ${source.title}` : ''}` - ); - } - ); - - // Log performance metrics - safeLog( - 'info', - `Deep research completed in ${Date.now() - researchStartTime}ms` - ); - - if (!response.success) { - throw new Error(response.error || 'Deep research failed'); - } - - // Format the results - const formattedResponse = { - finalAnalysis: response.data.finalAnalysis, - activities: response.data.activities, - sources: response.data.sources, - }; - - return { - content: [ - { - type: 'text', - text: trimResponseText(formattedResponse.finalAnalysis), - }, - ], - isError: false, - }; - } catch (error) { - const errorMessage = - error instanceof Error ? error.message : String(error); - return { - content: [{ type: 'text', text: trimResponseText(errorMessage) }], - isError: true, - }; - } - } - case 'firecrawl_generate_llmstxt': { if (!isGenerateLLMsTextOptions(args)) { throw new Error('Invalid arguments for firecrawl_generate_llmstxt'); @@ -1454,12 +1476,11 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}` }); // Helper function to format results -function formatResults(data: FirecrawlDocument[]): string { +function formatResults(data: Document[]): string { return data .map((doc) => { const content = doc.markdown || doc.html || doc.rawHtml || 'No content'; - return `URL: ${doc.url || 'Unknown URL'} -Content: ${content.substring(0, 100)}${content.length > 100 ? '...' : ''} + return `Content: ${content.substring(0, 100)}${content.length > 100 ? '...' : ''} ${doc.metadata?.title ? `Title: ${doc.metadata.title}` : ''}`; }) .join('\n\n'); @@ -1533,7 +1554,101 @@ async function runSSELocalServer() { console.error('Error starting server:', error); } } +async function runHTTPStreamableServer() { + const app = express(); + app.use(express.json()); + + const transports: { [sessionId: string]: StreamableHTTPServerTransport } = {}; + + // A single endpoint handles all MCP requests. + app.all('/mcp', async (req: Request, res: Response) => { + try { + const sessionId = req.headers['mcp-session-id'] as string | undefined; + let transport: StreamableHTTPServerTransport; + + if (sessionId && transports[sessionId]) { + transport = transports[sessionId]; + } else if ( + !sessionId && + req.method === 'POST' && + req.body && + typeof req.body === 'object' && + (req.body as any).method === 'initialize' + ) { + transport = new StreamableHTTPServerTransport({ + sessionIdGenerator: () => { + const id = randomUUID(); + return id; + }, + onsessioninitialized: (sid: string) => { + transports[sid] = transport; + }, + }); + + transport.onclose = () => { + const sid = transport.sessionId; + if (sid && transports[sid]) { + delete transports[sid]; + } + }; + console.log('Creating server instance'); + console.log('Connecting transport to server'); + await server.connect(transport); + await transport.handleRequest(req, res, req.body); + return; + } else { + res.status(400).json({ + jsonrpc: '2.0', + error: { + code: -32000, + message: 'Invalid or missing session ID', + }, + id: null, + }); + return; + } + + await transport.handleRequest(req, res, req.body); + } catch (error) { + if (!res.headersSent) { + res.status(500).json({ + jsonrpc: '2.0', + error: { + code: -32603, + message: 'Internal server error', + }, + id: null, + }); + } + } + }); + + const PORT = 3000; + const appServer = app.listen(PORT, () => { + console.log(`MCP Streamable HTTP Server listening on port ${PORT}`); + }); + + process.on('SIGINT', async () => { + console.log('Shutting down server...'); + for (const sessionId in transports) { + try { + console.log(`Closing transport for session ${sessionId}`); + await transports[sessionId].close(); + delete transports[sessionId]; + } catch (error) { + console.error( + `Error closing transport for session ${sessionId}:`, + error + ); + } + } + appServer.close(() => { + console.log('Server shutdown complete'); + process.exit(0); + }); + }); +} async function runSSECloudServer() { const transports: { [sessionId: string]: SSEServerTransport } = {}; const app = express(); @@ -1608,6 +1723,12 @@ if (process.env.CLOUD_SERVICE === 'true') { console.error('Fatal error running server:', error); process.exit(1); }); +} else if (process.env.HTTP_STREAMABLE_SERVER === 'true') { + console.log('Running HTTP Streamable Server'); + runHTTPStreamableServer().catch((error: any) => { + console.error('Fatal error running server:', error); + process.exit(1); + }); } else { runLocalServer().catch((error: any) => { console.error('Fatal error running server:', error); From 363af2ff74a4f957d8ac9d52a6235228f26a66f4 Mon Sep 17 00:00:00 2001 From: Thomas Kosmas Date: Tue, 19 Aug 2025 16:57:09 +0300 Subject: [PATCH 02/13] fixes --- src/index.ts | 202 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 120 insertions(+), 82 deletions(-) diff --git a/src/index.ts b/src/index.ts index 676a3e4..402ad41 100644 --- a/src/index.ts +++ b/src/index.ts @@ -57,16 +57,40 @@ This is the most powerful, fastest and most reliable scraper tool, if available formats: { type: 'array', items: { - type: 'string', - enum: [ - 'markdown', - 'html', - 'rawHtml', - 'screenshot', - 'links', - 'screenshot@fullPage', - 'extract', - 'summary', + oneOf: [ + { + type: 'string', + enum: [ + 'markdown', + 'html', + 'rawHtml', + 'screenshot', + 'links', + 'extract', + 'summary', + ], + }, + { + type: 'object', + properties: { + type: { + type: 'string', + enum: ['json'], + }, + prompt: { + type: 'string', + description: 'Prompt to guide JSON extraction', + }, + schema: { + type: 'object', + description: 'JSON schema for structured extraction', + }, + }, + required: ['type'], + additionalProperties: true, + description: + 'Advanced format option. Use { type: "json", prompt, schema } to request structured JSON extraction.', + }, ], }, default: ['markdown'], @@ -145,24 +169,6 @@ This is the most powerful, fastest and most reliable scraper tool, if available }, description: 'List of actions to perform before scraping', }, - extract: { - type: 'object', - properties: { - schema: { - type: 'object', - description: 'Schema for structured data extraction', - }, - systemPrompt: { - type: 'string', - description: 'System prompt for LLM extraction', - }, - prompt: { - type: 'string', - description: 'User prompt for LLM extraction', - }, - }, - description: 'Configuration for structured data extraction', - }, mobile: { type: 'boolean', description: 'Use mobile viewport', @@ -267,7 +273,7 @@ Map a website to discover all indexed URLs on the site. const CRAWL_TOOL: Tool = { name: 'firecrawl_crawl', description: ` - Starts an asynchronous crawl job on a website and extracts content from all pages. + Starts a crawl job on a website and extracts content from all pages. **Best for:** Extracting content from multiple related pages, when you need comprehensive coverage. **Not recommended for:** Extracting content from a single page (use scrape); when token limits are a concern (use map + batch_scrape); when you need fast results (crawling can be slow). @@ -392,17 +398,44 @@ const CRAWL_TOOL: Tool = { formats: { type: 'array', items: { - type: 'string', - enum: [ - 'markdown', - 'html', - 'rawHtml', - 'screenshot', - 'links', - 'screenshot@fullPage', - 'extract', + oneOf: [ + { + type: 'string', + enum: [ + 'markdown', + 'html', + 'rawHtml', + 'screenshot', + 'links', + 'extract', + 'summary', + ], + }, + { + type: 'object', + properties: { + type: { + type: 'string', + enum: ['json'], + }, + prompt: { + type: 'string', + description: 'Prompt to guide JSON extraction', + }, + schema: { + type: 'object', + description: 'JSON schema for structured extraction', + }, + }, + required: ['type'], + additionalProperties: true, + description: + 'Advanced format option. Use { type: "json", prompt, schema } to request structured JSON extraction.', + }, ], }, + default: ['markdown'], + description: "Content formats to extract (default: ['markdown'])", }, onlyMainContent: { type: 'boolean', @@ -552,8 +585,22 @@ Search the web and optionally extract content from search results. This is the m formats: { type: 'array', items: { - type: 'string', - enum: ['markdown', 'html', 'rawHtml'], + oneOf: [ + { + type: 'string', + enum: ['markdown', 'html', 'rawHtml'], + }, + { + type: 'object', + properties: { + type: { type: 'string', enum: ['json'] }, + prompt: { type: 'string' }, + schema: { type: 'object' }, + }, + required: ['type'], + additionalProperties: true, + }, + ], }, description: 'Content formats to extract from search results', }, @@ -805,7 +852,7 @@ interface SearchOptions { languages?: string[]; }; scrapeOptions?: { - formats?: string[]; + formats?: any[]; onlyMainContent?: boolean; waitFor?: number; includeTags?: string[]; @@ -1097,7 +1144,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => { if (!isScrapeOptions(args)) { throw new Error('Invalid arguments for firecrawl_scrape'); } - const { url, ...options } = args; + const { url, ...options } = args as any; const cleaned = removeEmptyTopLevel(options); try { const scrapeStartTime = Date.now(); @@ -1108,37 +1155,42 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => { const response = await client.scrape(url, { ...cleaned, origin: 'mcp-server', - }); + } as any); // Log performance metrics safeLog( 'info', `Scrape completed in ${Date.now() - scrapeStartTime}ms` ); - // if ('success' in response && !response.success) { - // throw new Error(response.error || 'Scraping failed'); - // } - // Format content based on requested formats - const contentParts = []; + const contentParts: string[] = []; + + const formats = (options?.formats ?? []) as any[]; + const hasFormat = (name: string) => + Array.isArray(formats) && + formats.some((f) => + typeof f === 'string' + ? f === name + : f && typeof f === 'object' && (f as any).type === name + ); - if (options.formats?.includes('markdown') && response.markdown) { - contentParts.push(response.markdown); + if (hasFormat('markdown') && (response as any).markdown) { + contentParts.push((response as any).markdown); } - if (options.formats?.includes('html') && response.html) { - contentParts.push(response.html); + if (hasFormat('html') && (response as any).html) { + contentParts.push((response as any).html); } - if (options.formats?.includes('rawHtml') && response.rawHtml) { - contentParts.push(response.rawHtml); + if (hasFormat('rawHtml') && (response as any).rawHtml) { + contentParts.push((response as any).rawHtml); } - if (options.formats?.includes('links') && response.links) { - contentParts.push(response.links.join('\n')); + if (hasFormat('links') && (response as any).links) { + contentParts.push((response as any).links.join('\n')); } - if (options.formats?.includes('screenshot') && response.screenshot) { - contentParts.push(response.screenshot); + if (hasFormat('screenshot') && (response as any).screenshot) { + contentParts.push((response as any).screenshot); } - if (options.formats?.includes('json') && response.json) { - contentParts.push(JSON.stringify(response.json, null, 2)); + if (hasFormat('json') && (response as any).json) { + contentParts.push(JSON.stringify((response as any).json, null, 2)); } // If options.formats is empty, default to markdown @@ -1147,8 +1199,8 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => { } // Add warning to response if present - if (response.warning) { - safeLog('warning', response.warning); + if ((response as any).warning) { + safeLog('warning', (response as any).warning); } return { @@ -1201,22 +1253,19 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => { const { url, ...options } = args; const response = await withRetry( async () => - // @ts-expect-error Extended API options including origin - client.asyncCrawlUrl(url, { ...options, origin: 'mcp-server' }), + client.crawl(url as string, { + ...options, + // @ts-expect-error Extended API options including origin + origin: 'mcp-server', + }), 'crawl operation' ); - if (!response.success) { - throw new Error(response.error); - } - return { content: [ { type: 'text', - text: trimResponseText( - `Started crawl for ${url} with job ID: ${response.id}. Use firecrawl_check_crawl_status to check progress.` - ), + text: trimResponseText(JSON.stringify(response)), }, ], isError: false, @@ -1258,17 +1307,6 @@ ${ 'search operation' ); - // Format the results - // const results = response.data - // .map( - // (result) => - // `URL: ${result.url} - // Title: ${result.title || 'No title'} - // Description: ${result.description || 'No description'} - // ${result.markdown ? `\nContent:\n${result.markdown}` : ''}` - // ) - // .join('\n\n'); - return { content: [ { From e3e8d0034009aefd9f0c5f929c035877ab78b4e4 Mon Sep 17 00:00:00 2001 From: Nicolas Date: Fri, 22 Aug 2025 21:07:41 -0700 Subject: [PATCH 03/13] Nick: fixed a few issues --- package.json | 2 +- src/index.ts | 119 +++++++-------------------------------------------- 2 files changed, 17 insertions(+), 104 deletions(-) diff --git a/package.json b/package.json index 5d10edf..430881c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "firecrawl-mcp", - "version": "1.12.0", + "version": "2.0.0", "description": "MCP server for Firecrawl web scraping integration. Supports both cloud and self-hosted instances. Features include web scraping, batch processing, structured data extraction, and LLM-powered content analysis.", "type": "module", "bin": { diff --git a/src/index.ts b/src/index.ts index 402ad41..7f2fd4e 100644 --- a/src/index.ts +++ b/src/index.ts @@ -246,11 +246,10 @@ Map a website to discover all indexed URLs on the site. }, sitemap: { type: 'string', - enum: ['skip', 'include', 'only'], - default: 'include', - description: - 'Sitemap mode when mapping. If set to skip, the sitemap will not be used. If set to only, only URLs that are in the sitemap will be returned. By default (include), the sitemap and other methods will be used together to find URLs.', + enum: ['include', 'skip', 'only'], + description: 'Sitemap handling: "include" - use sitemap + find other pages (default), "skip" - ignore sitemap completely, "only" - only return sitemap URLs', }, + includeSubdomains: { type: 'boolean', description: 'Include URLs from subdomains in results', @@ -496,6 +495,7 @@ Search the web and optionally extract content from search results. This is the m **Not recommended for:** When you already know which website to scrape (use scrape); when you need comprehensive coverage of a single website (use map or crawl). **Common mistakes:** Using crawl or map for open-ended questions (use search instead). **Prompt Example:** "Find the latest research papers on AI published in 2023." +**Sources:** web, images, news, default to web unless needed images or news. **Usage Example:** \`\`\`json { @@ -505,6 +505,11 @@ Search the web and optionally extract content from search results. This is the m "limit": 5, "lang": "en", "country": "us", + "sources": [ + "web", + "images", + "news" + ], "scrapeOptions": { "formats": ["markdown"], "onlyMainContent": true @@ -698,102 +703,6 @@ Extract structured information from web pages using LLM capabilities. Supports b }, }; -const DEEP_RESEARCH_TOOL: Tool = { - name: 'firecrawl_deep_research', - description: ` -Conduct deep web research on a query using intelligent crawling, search, and LLM analysis. - -**Best for:** Complex research questions requiring multiple sources, in-depth analysis. -**Not recommended for:** Simple questions that can be answered with a single search; when you need very specific information from a known page (use scrape); when you need results quickly (deep research can take time). -**Arguments:** -- query (string, required): The research question or topic to explore. -- maxDepth (number, optional): Maximum recursive depth for crawling/search (default: 3). -- timeLimit (number, optional): Time limit in seconds for the research session (default: 120). -- maxUrls (number, optional): Maximum number of URLs to analyze (default: 50). -**Prompt Example:** "Research the environmental impact of electric vehicles versus gasoline vehicles." -**Usage Example:** -\`\`\`json -{ - "name": "firecrawl_deep_research", - "arguments": { - "query": "What are the environmental impacts of electric vehicles compared to gasoline vehicles?", - "maxDepth": 3, - "timeLimit": 120, - "maxUrls": 50 - } -} -\`\`\` -**Returns:** Final analysis generated by an LLM based on research. (data.finalAnalysis); may also include structured activities and sources used in the research process. -`, - inputSchema: { - type: 'object', - properties: { - query: { - type: 'string', - description: 'The query to research', - }, - maxDepth: { - type: 'number', - description: 'Maximum depth of research iterations (1-10)', - }, - timeLimit: { - type: 'number', - description: 'Time limit in seconds (30-300)', - }, - maxUrls: { - type: 'number', - description: 'Maximum number of URLs to analyze (1-1000)', - }, - }, - required: ['query'], - }, -}; - -const GENERATE_LLMSTXT_TOOL: Tool = { - name: 'firecrawl_generate_llmstxt', - description: ` -Generate a standardized llms.txt (and optionally llms-full.txt) file for a given domain. This file defines how large language models should interact with the site. - -**Best for:** Creating machine-readable permission guidelines for AI models. -**Not recommended for:** General content extraction or research. -**Arguments:** -- url (string, required): The base URL of the website to analyze. -- maxUrls (number, optional): Max number of URLs to include (default: 10). -- showFullText (boolean, optional): Whether to include llms-full.txt contents in the response. -**Prompt Example:** "Generate an LLMs.txt file for example.com." -**Usage Example:** -\`\`\`json -{ - "name": "firecrawl_generate_llmstxt", - "arguments": { - "url": "https://example.com", - "maxUrls": 20, - "showFullText": true - } -} -\`\`\` -**Returns:** LLMs.txt file contents (and optionally llms-full.txt). -`, - inputSchema: { - type: 'object', - properties: { - url: { - type: 'string', - description: 'The URL to generate LLMs.txt from', - }, - maxUrls: { - type: 'number', - description: 'Maximum number of URLs to process (1-100, default: 10)', - }, - showFullText: { - type: 'boolean', - description: 'Whether to show the full LLMs-full.txt in the response', - }, - }, - required: ['url'], - }, -}; - /** * Parameters for LLMs.txt generation operations. */ @@ -1108,8 +1017,6 @@ server.setRequestHandler(ListToolsRequestSchema, async () => ({ CHECK_CRAWL_STATUS_TOOL, SEARCH_TOOL, EXTRACT_TOOL, - DEEP_RESEARCH_TOOL, - GENERATE_LLMSTXT_TOOL, ], })); @@ -1192,6 +1099,12 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => { if (hasFormat('json') && (response as any).json) { contentParts.push(JSON.stringify((response as any).json, null, 2)); } + if (hasFormat('changeTracking') && (response as any).changeTracking) { + contentParts.push(JSON.stringify((response as any).changeTracking, null, 2)); + } + if (hasFormat('summary') && (response as any).summary) { + contentParts.push(JSON.stringify((response as any).summary, null, 2)); + } // If options.formats is empty, default to markdown if (!options.formats || options.formats.length === 0) { @@ -1240,7 +1153,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => { } return { content: [ - { type: 'text', text: trimResponseText(response.links.join('\n')) }, + { type: 'text', text: trimResponseText(JSON.stringify(response.links, null, 2)) }, ], isError: false, }; From c02bba4d5fca31b02170983491eb1c6f76584c28 Mon Sep 17 00:00:00 2001 From: Nicolas Date: Fri, 22 Aug 2025 21:08:08 -0700 Subject: [PATCH 04/13] Update index.ts --- src/index.ts | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/index.ts b/src/index.ts index 7f2fd4e..1c17d03 100644 --- a/src/index.ts +++ b/src/index.ts @@ -635,7 +635,6 @@ Extract structured information from web pages using LLM capabilities. Supports b **Arguments:** - urls: Array of URLs to extract information from - prompt: Custom prompt for the LLM extraction -- systemPrompt: System prompt to guide the LLM - schema: JSON schema for structured data extraction - allowExternalLinks: Allow extraction from external links - enableWebSearch: Enable web search for additional context @@ -648,7 +647,6 @@ Extract structured information from web pages using LLM capabilities. Supports b "arguments": { "urls": ["https://example.com/page1", "https://example.com/page2"], "prompt": "Extract product information including name, price, and description", - "systemPrompt": "You are a helpful assistant that extracts product information", "schema": { "type": "object", "properties": { @@ -678,10 +676,6 @@ Extract structured information from web pages using LLM capabilities. Supports b type: 'string', description: 'Prompt for the LLM extraction', }, - systemPrompt: { - type: 'string', - description: 'System prompt for LLM extraction', - }, schema: { type: 'object', description: 'JSON schema for structured data extraction', @@ -786,7 +780,6 @@ interface SearchOptions { // Add after other interfaces interface ExtractParams { prompt?: string; - systemPrompt?: string; schema?: T | object; allowExternalLinks?: boolean; enableWebSearch?: boolean; @@ -797,7 +790,6 @@ interface ExtractParams { interface ExtractArgs { urls: string[]; prompt?: string; - systemPrompt?: string; schema?: object; allowExternalLinks?: boolean; enableWebSearch?: boolean; @@ -1264,7 +1256,6 @@ ${ client.extract({ urls: args.urls, prompt: args.prompt, - systemPrompt: args.systemPrompt, schema: args.schema, allowExternalLinks: args.allowExternalLinks, enableWebSearch: args.enableWebSearch, From 02bfe36133491dda907afdaac8fee88b828624b4 Mon Sep 17 00:00:00 2001 From: Nicolas Date: Fri, 22 Aug 2025 21:09:24 -0700 Subject: [PATCH 05/13] Update index.ts --- src/index.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/index.ts b/src/index.ts index 1c17d03..901f93c 100644 --- a/src/index.ts +++ b/src/index.ts @@ -489,10 +489,10 @@ Check the status of a crawl job. const SEARCH_TOOL: Tool = { name: 'firecrawl_search', description: ` -Search the web and optionally extract content from search results. This is the most powerful search tool available, and if available you should always default to using this tool for any web search needs. +Search the web and optionally extract content from search results. This is the most powerful web search tool available, and if available you should always default to using this tool for any web search needs. **Best for:** Finding specific information across multiple websites, when you don't know which website has the information; when you need the most relevant content for a query. -**Not recommended for:** When you already know which website to scrape (use scrape); when you need comprehensive coverage of a single website (use map or crawl). +**Not recommended for:** When you need to search the filesystem. When you already know which website to scrape (use scrape); when you need comprehensive coverage of a single website (use map or crawl. **Common mistakes:** Using crawl or map for open-ended questions (use search instead). **Prompt Example:** "Find the latest research papers on AI published in 2023." **Sources:** web, images, news, default to web unless needed images or news. From f9085135cb45dfca76d0bb5621343976df9ff2a5 Mon Sep 17 00:00:00 2001 From: Nicolas Date: Fri, 22 Aug 2025 21:10:58 -0700 Subject: [PATCH 06/13] Update index.ts --- src/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/index.ts b/src/index.ts index 901f93c..47265b4 100644 --- a/src/index.ts +++ b/src/index.ts @@ -40,7 +40,7 @@ This is the most powerful, fastest and most reliable scraper tool, if available "arguments": { "url": "https://example.com", "formats": ["markdown"], - "maxAge": 3600000 + "maxAge": 172800000 } } \`\`\` From 043a6b13b0e7ec129039d47a786baf92592ddf80 Mon Sep 17 00:00:00 2001 From: Nicolas Date: Sat, 23 Aug 2025 11:19:49 -0700 Subject: [PATCH 07/13] Nick: --- README.md | 76 ---------------------------------------------------- package.json | 5 ++-- src/index.ts | 20 +++++++------- 3 files changed, 13 insertions(+), 88 deletions(-) diff --git a/README.md b/README.md index 949fcfc..b2e4813 100644 --- a/README.md +++ b/README.md @@ -311,8 +311,6 @@ Use this guide to select the right tool for your task: - **If you want to search the web for info:** use **search** - **If you want to extract structured data:** use **extract** - **If you want to analyze a whole site or section:** use **crawl** (with limits!) -- **If you want to do in-depth research:** use **deep_research** -- **If you want to generate LLMs.txt:** use **generate_llmstxt** ### Quick Reference Table @@ -324,8 +322,6 @@ Use this guide to select the right tool for your task: | crawl | Multi-page extraction (with limits) | markdown/html[] | | search | Web search for info | results[] | | extract | Structured data from pages | JSON | -| deep_research | In-depth, multi-source research | summary, sources| -| generate_llmstxt | LLMs.txt for a domain | text | ## Available Tools @@ -629,78 +625,6 @@ When using a self-hosted instance, the extraction will use your configured LLM. } ``` -### 9. Deep Research Tool (`firecrawl_deep_research`) - -Conduct deep web research on a query using intelligent crawling, search, and LLM analysis. - -**Best for:** -- Complex research questions requiring multiple sources, in-depth analysis. - -**Not recommended for:** -- Simple questions that can be answered with a single search -- When you need very specific information from a known page (use scrape) -- When you need results quickly (deep research can take time) - -**Arguments:** -- query (string, required): The research question or topic to explore. -- maxDepth (number, optional): Maximum recursive depth for crawling/search (default: 3). -- timeLimit (number, optional): Time limit in seconds for the research session (default: 120). -- maxUrls (number, optional): Maximum number of URLs to analyze (default: 50). - -**Prompt Example:** -> "Research the environmental impact of electric vehicles versus gasoline vehicles." - -**Usage Example:** -```json -{ - "name": "firecrawl_deep_research", - "arguments": { - "query": "What are the environmental impacts of electric vehicles compared to gasoline vehicles?", - "maxDepth": 3, - "timeLimit": 120, - "maxUrls": 50 - } -} -``` - -**Returns:** -- Final analysis generated by an LLM based on research. (data.finalAnalysis) -- May also include structured activities and sources used in the research process. - -### 10. Generate LLMs.txt Tool (`firecrawl_generate_llmstxt`) - -Generate a standardized llms.txt (and optionally llms-full.txt) file for a given domain. This file defines how large language models should interact -with the site. - -**Best for:** -- Creating machine-readable permission guidelines for AI models. - -**Not recommended for:** -- General content extraction or research - -**Arguments:** -- url (string, required): The base URL of the website to analyze. -- maxUrls (number, optional): Max number of URLs to include (default: 10). -- showFullText (boolean, optional): Whether to include llms-full.txt contents in the response. - -**Prompt Example:** -> "Generate an LLMs.txt file for example.com." - -**Usage Example:** -```json -{ - "name": "firecrawl_generate_llmstxt", - "arguments": { - "url": "https://example.com", - "maxUrls": 20, - "showFullText": true - } -} -``` - -**Returns:** -- LLMs.txt file contents (and optionally llms-full.txt) - ## Logging System The server includes comprehensive logging: diff --git a/package.json b/package.json index 430881c..37313f4 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "firecrawl-mcp", - "version": "2.0.0", + "version": "2.0.1", "description": "MCP server for Firecrawl web scraping integration. Supports both cloud and self-hosted instances. Features include web scraping, batch processing, structured data extraction, and LLM-powered content analysis.", "type": "module", "bin": { @@ -20,7 +20,8 @@ "lint:fix": "eslint src/**/*.ts --fix", "format": "prettier --write .", "prepare": "npm run build", - "publish": "npm run build && npm publish" + "publish": "npm run build && npm publish", + "publish-beta": "npm run build && npm publish --tag beta" }, "license": "MIT", "dependencies": { diff --git a/src/index.ts b/src/index.ts index 47265b4..8c746c1 100644 --- a/src/index.ts +++ b/src/index.ts @@ -277,7 +277,7 @@ const CRAWL_TOOL: Tool = { **Best for:** Extracting content from multiple related pages, when you need comprehensive coverage. **Not recommended for:** Extracting content from a single page (use scrape); when token limits are a concern (use map + batch_scrape); when you need fast results (crawling can be slow). **Warning:** Crawl responses can be very large and may exceed token limits. Limit the crawl depth and number of pages, or use map + batch_scrape for better control. - **Common mistakes:** Setting limit or maxDiscoveryDepth too high (causes token overflow); using crawl for a single page (use scrape instead). + **Common mistakes:** Setting limit or maxDiscoveryDepth too high (causes token overflow) or too low (causes missing pages); using crawl for a single page (use scrape instead). Using a /* wildcard is not recommended. **Prompt Example:** "Get all blog posts from the first two levels of example.com/blog." **Usage Example:** \`\`\`json @@ -552,15 +552,15 @@ Search the web and optionally extract content from search results. This is the m type: 'object', properties: { type: { type: 'string', enum: ['web'] }, - tbs: { - type: 'string', - description: - 'Time-based search parameter (e.g., qdr:h, qdr:d, qdr:w, qdr:m, qdr:y or custom cdr with cd_min/cd_max)', - }, - location: { - type: 'string', - description: 'Location parameter for search results', - }, + // tbs: { + // type: 'string', + // description: + // 'Time-based search parameter (e.g., qdr:h, qdr:d, qdr:w, qdr:m, qdr:y or custom cdr with cd_min/cd_max)', + // }, + // location: { + // type: 'string', + // description: 'Location parameter for search results', + // }, }, required: ['type'], additionalProperties: false, From cbf598a121b662c3f58ed9140b2f1aba223ea6e0 Mon Sep 17 00:00:00 2001 From: Nicolas Date: Sat, 23 Aug 2025 11:37:50 -0700 Subject: [PATCH 08/13] Update index.ts --- src/index.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/index.ts b/src/index.ts index 8c746c1..2f1eaf5 100644 --- a/src/index.ts +++ b/src/index.ts @@ -68,6 +68,7 @@ This is the most powerful, fastest and most reliable scraper tool, if available 'links', 'extract', 'summary', + 'changeTracking', ], }, { @@ -285,8 +286,8 @@ const CRAWL_TOOL: Tool = { "name": "firecrawl_crawl", "arguments": { "url": "https://example.com/blog/*", - "maxDiscoveryDepth": 2, - "limit": 100, + "maxDiscoveryDepth": 5, + "limit": 20, "allowExternalLinks": false, "deduplicateSimilarURLs": true, "sitemap": "include" From 14076c13be5cf1970887742928c314591079b564 Mon Sep 17 00:00:00 2001 From: Nicolas Date: Sat, 23 Aug 2025 11:40:06 -0700 Subject: [PATCH 09/13] Update index.ts --- src/index.ts | 54 ---------------------------------------------------- 1 file changed, 54 deletions(-) diff --git a/src/index.ts b/src/index.ts index 2f1eaf5..066e951 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1328,60 +1328,6 @@ ${ } } - case 'firecrawl_generate_llmstxt': { - if (!isGenerateLLMsTextOptions(args)) { - throw new Error('Invalid arguments for firecrawl_generate_llmstxt'); - } - - try { - const { url, ...params } = args; - const generateStartTime = Date.now(); - - safeLog('info', `Starting LLMs.txt generation for URL: ${url}`); - - // Start the generation process - const response = await withRetry( - async () => - // @ts-expect-error Extended API options including origin - client.generateLLMsText(url, { ...params, origin: 'mcp-server' }), - 'LLMs.txt generation' - ); - - if (!response.success) { - throw new Error(response.error || 'LLMs.txt generation failed'); - } - - // Log performance metrics - safeLog( - 'info', - `LLMs.txt generation completed in ${Date.now() - generateStartTime}ms` - ); - - // Format the response - let resultText = ''; - - if ('data' in response) { - resultText = `LLMs.txt content:\n\n${response.data.llmstxt}`; - - if (args.showFullText && response.data.llmsfulltxt) { - resultText += `\n\nLLMs-full.txt content:\n\n${response.data.llmsfulltxt}`; - } - } - - return { - content: [{ type: 'text', text: trimResponseText(resultText) }], - isError: false, - }; - } catch (error) { - const errorMessage = - error instanceof Error ? error.message : String(error); - return { - content: [{ type: 'text', text: trimResponseText(errorMessage) }], - isError: true, - }; - } - } - default: return { content: [ From 23afb5c17163c82776e3c2dd4083257cf2603261 Mon Sep 17 00:00:00 2001 From: Nicolas Date: Sat, 23 Aug 2025 11:40:21 -0700 Subject: [PATCH 10/13] Update index.ts --- src/index.ts | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/index.ts b/src/index.ts index 066e951..0440809 100644 --- a/src/index.ts +++ b/src/index.ts @@ -248,9 +248,10 @@ Map a website to discover all indexed URLs on the site. sitemap: { type: 'string', enum: ['include', 'skip', 'only'], - description: 'Sitemap handling: "include" - use sitemap + find other pages (default), "skip" - ignore sitemap completely, "only" - only return sitemap URLs', + description: + 'Sitemap handling: "include" - use sitemap + find other pages (default), "skip" - ignore sitemap completely, "only" - only return sitemap URLs', }, - + includeSubdomains: { type: 'boolean', description: 'Include URLs from subdomains in results', @@ -1093,10 +1094,14 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => { contentParts.push(JSON.stringify((response as any).json, null, 2)); } if (hasFormat('changeTracking') && (response as any).changeTracking) { - contentParts.push(JSON.stringify((response as any).changeTracking, null, 2)); + contentParts.push( + JSON.stringify((response as any).changeTracking, null, 2) + ); } if (hasFormat('summary') && (response as any).summary) { - contentParts.push(JSON.stringify((response as any).summary, null, 2)); + contentParts.push( + JSON.stringify((response as any).summary, null, 2) + ); } // If options.formats is empty, default to markdown @@ -1146,7 +1151,10 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => { } return { content: [ - { type: 'text', text: trimResponseText(JSON.stringify(response.links, null, 2)) }, + { + type: 'text', + text: trimResponseText(JSON.stringify(response.links, null, 2)), + }, ], isError: false, }; From 70741de5f9f5bc2d8d811b186bac7bdac220ca8f Mon Sep 17 00:00:00 2001 From: Nicolas Date: Sat, 23 Aug 2025 11:42:14 -0700 Subject: [PATCH 11/13] Update index.ts --- src/index.ts | 49 +++++++++++++++++++------------------------------ 1 file changed, 19 insertions(+), 30 deletions(-) diff --git a/src/index.ts b/src/index.ts index 0440809..c6577a3 100644 --- a/src/index.ts +++ b/src/index.ts @@ -699,25 +699,25 @@ Extract structured information from web pages using LLM capabilities. Supports b }, }; -/** - * Parameters for LLMs.txt generation operations. - */ -interface GenerateLLMsTextParams { - /** - * Maximum number of URLs to process (1-100) - * @default 10 - */ - maxUrls?: number; - /** - * Whether to show the full LLMs-full.txt in the response - * @default false - */ - showFullText?: boolean; - /** - * Experimental flag for streaming - */ - __experimental_stream?: boolean; -} +// /** +// * Parameters for LLMs.txt generation operations. +// */ +// interface GenerateLLMsTextParams { +// /** +// * Maximum number of URLs to process (1-100) +// * @default 10 +// */ +// maxUrls?: number; +// /** +// * Whether to show the full LLMs-full.txt in the response +// * @default false +// */ +// showFullText?: boolean; +// /** +// * Experimental flag for streaming +// */ +// __experimental_stream?: boolean; +// } /** * Response interface for LLMs.txt generation operations. @@ -865,17 +865,6 @@ function isExtractOptions(args: unknown): args is ExtractArgs { ); } -function isGenerateLLMsTextOptions( - args: unknown -): args is { url: string } & Partial { - return ( - typeof args === 'object' && - args !== null && - 'url' in args && - typeof (args as { url: unknown }).url === 'string' - ); -} - function removeEmptyTopLevel>( obj: T ): Partial { From 65fcc217033db04575806785a242ce09ef8d8a87 Mon Sep 17 00:00:00 2001 From: Nicolas Date: Sat, 23 Aug 2025 11:59:59 -0700 Subject: [PATCH 12/13] Nick: --- .github/workflows/ci.yml | 3 --- package.json | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 619d05e..f14693f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,6 +27,3 @@ jobs: - name: Lint run: npm run lint - - - name: Test - run: npm test diff --git a/package.json b/package.json index 37313f4..e87b051 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "firecrawl-mcp", - "version": "2.0.1", + "version": "2.0.2", "description": "MCP server for Firecrawl web scraping integration. Supports both cloud and self-hosted instances. Features include web scraping, batch processing, structured data extraction, and LLM-powered content analysis.", "type": "module", "bin": { From 0f28ac39a107067c2797185d4d53ee6c0c34487a Mon Sep 17 00:00:00 2001 From: Nicolas Date: Sat, 23 Aug 2025 12:05:28 -0700 Subject: [PATCH 13/13] Update package.json --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index e87b051..ed5c977 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "firecrawl-mcp", "version": "2.0.2", - "description": "MCP server for Firecrawl web scraping integration. Supports both cloud and self-hosted instances. Features include web scraping, batch processing, structured data extraction, and LLM-powered content analysis.", + "description": "MCP server for Firecrawl web scraping integration. Supports both cloud and self-hosted instances. Features include web scraping, search, batch processing, structured data extraction, and LLM-powered content analysis.", "type": "module", "bin": { "firecrawl-mcp": "dist/index.js"