diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 00000000..1e7a87a6
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,45 @@
+---
+name: Bug Report
+about: Create a report to help us improve the LinkedIn MCP server
+title: '[BUG] '
+labels: ['bug']
+assignees: ''
+
+---
+
+## Installation Method
+- [ ] Docker (specify docker image version/tag): _._._
+- [ ] Claude Desktop DXT extension (specify docker image version/tag): _._._
+- [ ] Local Python setup
+
+## When does the error occur?
+- [ ] At startup
+- [ ] During tool call (specify which tool):
+ - [ ] get_person_profile
+ - [ ] get_company_profile
+ - [ ] get_job_details
+ - [ ] get_saved_jobs
+ - [ ] search_jobs
+ - [ ] close_session
+
+## MCP Client Configuration
+
+**Claude Desktop Config** (`/Users/[username]/Library/Application Support/Claude/claude_desktop_config.json`):
+```json
+{
+ "mcpServers": {
+ "linkedin": {
+ // Your configuration here (remove sensitive credentials)
+ }
+ }
+}
+```
+
+## MCP Client Logs
+**Claude Desktop Logs** (`/Users/[username]/Library/Logs/Claude/mcp-server-LinkedIn MCP Server.log`):
+```
+Paste relevant log entries here
+```
+
+## Error Description
+What went wrong and what did you expect to happen?
diff --git a/.github/ISSUE_TEMPLATE/documentation_issue.md b/.github/ISSUE_TEMPLATE/documentation_issue.md
new file mode 100644
index 00000000..816b05a3
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/documentation_issue.md
@@ -0,0 +1,50 @@
+---
+name: Documentation Issue
+about: Report problems with README, setup instructions, or other documentation
+title: '[DOCS] '
+labels: ['documentation']
+assignees: ''
+
+---
+
+## Documentation Problem
+**What documentation issue did you find?**
+- [ ] Incorrect/outdated setup instructions
+- [ ] Missing information
+- [ ] Unclear/confusing explanations
+- [ ] Broken links
+- [ ] Example code doesn't work
+- [ ] Missing prerequisites
+- [ ] Inconsistent information
+- [ ] Typos/grammar issues
+- [ ] Other: ___________
+
+## Location
+**Where is the documentation issue?**
+- [ ] README.md
+- [ ] Code comments
+- [ ] Error messages
+- [ ] CLI help text
+- [ ] Other: ___________
+
+**Specific section/line:**
+___________
+
+## Current Documentation
+**What does the documentation currently say?**
+```
+Paste the current text or link to the specific section
+```
+
+## Problem Description
+**What's wrong or confusing about it?**
+A clear description of why this documentation is problematic.
+
+## Suggested Fix
+**What should it say instead?**
+```
+Suggested replacement text or improvements
+```
+
+## Additional Context
+Add any other context, screenshots, or examples that would help improve the documentation.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 00000000..31328c57
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature Request
+about: Suggest an idea for the LinkedIn MCP server
+title: '[FEATURE] '
+labels: ['enhancement']
+assignees: ''
+
+---
+
+## Feature description
+Describe what you want to happen.
+
+## Use case
+Why this feature is useful.
+
+## Suggested implementation
+If you have a specific idea for how to implement this feature, please describe it here.
+
+## Additional context
+Add any other details that would help.
diff --git a/.github/workflows/claude.yml b/.github/workflows/claude.yml
new file mode 100644
index 00000000..c704cda5
--- /dev/null
+++ b/.github/workflows/claude.yml
@@ -0,0 +1,49 @@
+name: Claude Code
+
+on:
+ issue_comment:
+ types: [created]
+ pull_request_review_comment:
+ types: [created]
+ issues:
+ types: [opened, assigned]
+ pull_request_review:
+ types: [submitted]
+
+jobs:
+ claude:
+ if: |
+ (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
+ (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
+ (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
+ (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ pull-requests: read
+ issues: read
+ id-token: write
+ actions: read # Required for Claude to read CI results on PRs
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
+ with:
+ fetch-depth: 1
+
+ - name: Run Claude Code
+ id: claude
+ uses: anthropics/claude-code-action@68cfeead1890300cc87935dbe2c023825be87b8a # v1
+ with:
+ claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
+
+ # This is an optional setting that allows Claude to read CI results on PRs
+ additional_permissions: |
+ actions: read
+
+ # Optional: Give a custom prompt to Claude. If this is not specified, Claude will perform the instructions specified in the comment that tagged it.
+ # prompt: 'Update the pull request description to include a summary of changes.'
+
+ # Optional: Add claude_args to customize behavior and configuration
+ # See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
+ # or https://code.claude.com/docs/en/cli-reference for available options
+ # claude_args: '--allowed-tools Bash(gh pr:*)'
diff --git a/CLAUDE.md b/CLAUDE.md
index 47dc3e3d..c3170642 120000
--- a/CLAUDE.md
+++ b/CLAUDE.md
@@ -1 +1 @@
-AGENTS.md
\ No newline at end of file
+AGENTS.md
diff --git a/README.md b/README.md
index 4622ed71..30107a32 100644
--- a/README.md
+++ b/README.md
@@ -44,6 +44,7 @@ Through this LinkedIn MCP server, AI assistants like Claude can connect to your
| `search_jobs` | Search for jobs with keywords and location filters | working |
| `search_people` | Search for people by keywords and location | working |
| `get_job_details` | Get detailed information about a specific job posting | working |
+| `get_saved_jobs` | Get saved/bookmarked jobs from your LinkedIn job tracker | [#330](https://github.com/stickerdaniel/linkedin-mcp-server/issues/330) |
| `close_session` | Close browser session and clean up resources | working |
diff --git a/docs/docker-hub.md b/docs/docker-hub.md
index d1d4379d..07a77290 100644
--- a/docs/docker-hub.md
+++ b/docs/docker-hub.md
@@ -9,6 +9,7 @@ A Model Context Protocol (MCP) server that connects AI assistants to LinkedIn. A
- **Company Profiles**: Extract comprehensive company data
- **Job Details**: Retrieve job posting information
- **Job Search**: Search for jobs with keywords and location filters
+- **Saved Jobs**: Get saved/bookmarked jobs from your LinkedIn job tracker
- **People Search**: Search for people by keywords and location
- **Person Posts**: Get recent activity/posts from a person's profile
- **Company Posts**: Get recent posts from a company's LinkedIn feed
diff --git a/linkedin_mcp_server/scraping/extractor.py b/linkedin_mcp_server/scraping/extractor.py
index 53fd2242..8fc59834 100644
--- a/linkedin_mcp_server/scraping/extractor.py
+++ b/linkedin_mcp_server/scraping/extractor.py
@@ -6,6 +6,7 @@
from dataclasses import dataclass
import logging
import re
+from collections.abc import Awaitable, Callable
from typing import TYPE_CHECKING, Any, Literal
from urllib.parse import parse_qs, quote_plus, urljoin, urlparse
@@ -2129,6 +2130,163 @@ async def search_jobs(
result["section_errors"] = section_errors
return result
+ _EXTRACT_JOB_IDS_JS = """() => {
+ const seen = new Set();
+ const ids = [];
+ document.querySelectorAll('a[href*="/jobs/view/"]').forEach(a => {
+ const match = a.href.match(/\\/jobs\\/view\\/(\\d+)/);
+ if (match && !seen.has(match[1])) { seen.add(match[1]); ids.push(match[1]); }
+ });
+ return ids;
+ }"""
+
+ _EXTRACT_MAIN_TEXT_JS = """() => {
+ const main = document.querySelector('main');
+ return main ? main.innerText : document.body.innerText;
+ }"""
+
+ _EXTRACT_MAX_PAGE_JS = """() => {
+ const pageNumbers = Array.from(
+ document.querySelectorAll('button[aria-label^="Page "]')
+ )
+ .map(button => {
+ const label = button.getAttribute('aria-label') || '';
+ const match = label.match(/^Page (\\d+)$/);
+ return match ? Number(match[1]) : null;
+ })
+ .filter(page => Number.isInteger(page));
+ return pageNumbers.length ? Math.max(...pageNumbers) : 1;
+ }"""
+
+ async def scrape_saved_jobs(
+ self,
+ max_pages: int = 10,
+ on_progress: Callable[[int, int, str], Awaitable[None]] | None = None,
+ ) -> dict[str, Any]:
+ """Scrape the user's saved/bookmarked jobs from the jobs tracker page.
+
+ Automatically paginates through all pages using numbered page buttons.
+ Extracts job IDs from link hrefs (``/jobs/view//``) since they are
+ not present in the page's innerText.
+
+ Args:
+ max_pages: Safety cap on pages to scrape (default 10).
+ on_progress: Optional async callback ``(page, total, message)``
+ invoked after each page is scraped.
+
+ Returns:
+ {url, sections: {name: text}, job_ids: list[str]}
+ """
+ url = "https://www.linkedin.com/jobs-tracker/"
+ extracted = await self.extract_page(url, "saved_jobs")
+
+ all_text_parts: list[str] = []
+ all_job_ids: list[str] = []
+
+ if extracted.text:
+ all_text_parts.append(extracted.text)
+
+ # Collect job IDs from page 1.
+ page_ids: list[str] = await self._page.evaluate(self._EXTRACT_JOB_IDS_JS)
+ all_job_ids.extend(page_ids)
+ logger.info("Page 1: found %d job IDs", len(page_ids))
+
+ # LinkedIn can render a sliding window of numbered buttons, so refresh
+ # the largest visible page number as pagination advances.
+ total_pages = min(
+ max(await self._page.evaluate(self._EXTRACT_MAX_PAGE_JS), 1),
+ max_pages,
+ )
+ logger.info("Total pages detected: %d", total_pages)
+
+ if on_progress:
+ await on_progress(1, total_pages, "Fetched saved jobs page 1")
+
+ # Paginate through remaining pages using numbered page buttons.
+ for page_num in range(2, max_pages + 1):
+ page_btn = self._page.locator(f'button[aria-label="Page {page_num}"]')
+ if not await page_btn.count():
+ logger.info(
+ "No page %d button found — stopping at page %d",
+ page_num,
+ page_num - 1,
+ )
+ break
+
+ logger.info("Navigating to saved jobs page %d", page_num)
+ prev_ids = set(all_job_ids)
+ await page_btn.scroll_into_view_if_needed()
+ await page_btn.click()
+ await asyncio.sleep(_NAV_DELAY)
+
+ # Wait for the DOM to reflect new job links.
+ try:
+ await self._page.wait_for_function(
+ """(prevIds) => {
+ const prev = new Set(prevIds);
+ const links = document.querySelectorAll('a[href*="/jobs/view/"]');
+ for (const a of links) {
+ const match = a.href.match(/\\/jobs\\/view\\/(\\d+)/);
+ if (match && !prev.has(match[1])) return true;
+ }
+ return false;
+ }""",
+ arg=list(prev_ids),
+ timeout=15000,
+ )
+ except PlaywrightTimeoutError:
+ logger.info("No new job IDs appeared on page %d — stopping", page_num)
+ break
+
+ await scroll_to_bottom(self._page, pause_time=0.5, max_scrolls=3)
+
+ raw = await self._page.evaluate(self._EXTRACT_MAIN_TEXT_JS)
+ if raw:
+ cleaned = strip_linkedin_noise(raw)
+ if cleaned:
+ all_text_parts.append(cleaned)
+
+ page_ids = await self._page.evaluate(self._EXTRACT_JOB_IDS_JS)
+ new_ids = [jid for jid in page_ids if jid not in prev_ids]
+ logger.info("Page %d: found %d new job IDs", page_num, len(new_ids))
+ if not new_ids:
+ break
+ all_job_ids.extend(new_ids)
+
+ total_pages = max(
+ total_pages,
+ page_num,
+ min(
+ max(await self._page.evaluate(self._EXTRACT_MAX_PAGE_JS), 1),
+ max_pages,
+ ),
+ )
+
+ if on_progress:
+ await on_progress(
+ page_num, total_pages, f"Fetched saved jobs page {page_num}"
+ )
+
+ # Append a summary of job IDs so they are always visible in the text.
+ id_summary = "\n".join(
+ f"- Job ID: {jid} (https://www.linkedin.com/jobs/view/{jid}/)"
+ for jid in all_job_ids
+ )
+ if id_summary:
+ all_text_parts.append(f"--- Saved Job IDs ---\n{id_summary}")
+
+ sections: dict[str, str] = {}
+ if all_text_parts:
+ sections["saved_jobs"] = "\n\n".join(all_text_parts)
+
+ logger.info("Total saved jobs found: %d across all pages", len(all_job_ids))
+
+ return {
+ "url": url,
+ "sections": sections,
+ "job_ids": all_job_ids,
+ }
+
async def search_people(
self,
keywords: str,
diff --git a/linkedin_mcp_server/tools/job.py b/linkedin_mcp_server/tools/job.py
index fe8a14af..afdf9cf9 100644
--- a/linkedin_mcp_server/tools/job.py
+++ b/linkedin_mcp_server/tools/job.py
@@ -68,6 +68,60 @@ async def get_job_details(
except Exception as e:
raise_tool_error(e, "get_job_details") # NoReturn
+ @mcp.tool(
+ timeout=TOOL_TIMEOUT_SECONDS,
+ title="Get Saved Jobs",
+ annotations={"readOnlyHint": True, "openWorldHint": True},
+ tags={"job", "scraping"},
+ exclude_args=["extractor"],
+ )
+ async def get_saved_jobs(
+ ctx: Context,
+ max_pages: Annotated[int, Field(ge=1, le=10)] = 10,
+ extractor: Any | None = None,
+ ) -> dict[str, Any]:
+ """
+ Get the user's saved/bookmarked jobs from LinkedIn's job tracker.
+
+ Args:
+ ctx: FastMCP context for progress reporting
+ max_pages: Maximum number of saved-jobs pages to scrape (1-10, default 10)
+
+ Returns:
+ Dict with url, sections (name -> raw text), and job_ids (list of
+ LinkedIn job ID strings).
+ The LLM should parse the raw text to extract saved job listings.
+ """
+ try:
+ extractor = extractor or await get_ready_extractor(
+ ctx, tool_name="get_saved_jobs"
+ )
+ logger.info("Scraping saved jobs (max_pages=%d)", max_pages)
+
+ await ctx.report_progress(
+ progress=0, total=100, message="Fetching saved jobs"
+ )
+
+ async def _report(page: int, total: int, msg: str) -> None:
+ pct = min(int(page / max(total, 1) * 100), 99)
+ await ctx.report_progress(progress=pct, total=100, message=msg)
+
+ result = await extractor.scrape_saved_jobs(
+ max_pages=max_pages, on_progress=_report
+ )
+
+ await ctx.report_progress(progress=100, total=100, message="Complete")
+
+ return result
+
+ except AuthenticationError as e:
+ try:
+ await handle_auth_error(e, ctx)
+ except Exception as relogin_exc:
+ raise_tool_error(relogin_exc, "get_saved_jobs")
+ except Exception as e:
+ raise_tool_error(e, "get_saved_jobs") # NoReturn
+
@mcp.tool(
timeout=TOOL_TIMEOUT_SECONDS,
title="Search Jobs",
diff --git a/tests/test_scraping.py b/tests/test_scraping.py
index d3b1c335..10fa81e2 100644
--- a/tests/test_scraping.py
+++ b/tests/test_scraping.py
@@ -1790,6 +1790,243 @@ async def test_search_people_omits_orphaned_references(self, mock_page):
assert "references" not in result
+class TestScrapeSavedJobs:
+ async def test_scrape_saved_jobs_single_page(self, mock_page):
+ """Single page of results — no Next button. Progress callback fires."""
+
+ async def evaluate_side_effect(js, *args):
+ if 'button[aria-label^="Page "]' in js:
+ return 1
+ if "jobs/view" in js:
+ return ["111", "222"]
+ return None
+
+ mock_page.evaluate = AsyncMock(side_effect=evaluate_side_effect)
+ mock_next = MagicMock()
+ mock_next.count = AsyncMock(return_value=0)
+ mock_page.locator = MagicMock(return_value=mock_next)
+ on_progress = AsyncMock()
+ extractor = LinkedInExtractor(mock_page)
+ with patch.object(
+ extractor,
+ "extract_page",
+ new_callable=AsyncMock,
+ return_value=extracted("Saved Job 1\nSaved Job 2"),
+ ):
+ result = await extractor.scrape_saved_jobs(on_progress=on_progress)
+
+ assert result["url"] == "https://www.linkedin.com/jobs-tracker/"
+ assert "saved_jobs" in result["sections"]
+ assert "sections_requested" not in result
+ assert result["job_ids"] == ["111", "222"]
+ assert "Job ID: 111" in result["sections"]["saved_jobs"]
+ assert "Job ID: 222" in result["sections"]["saved_jobs"]
+ on_progress.assert_awaited_once_with(1, 1, "Fetched saved jobs page 1")
+
+ async def test_scrape_saved_jobs_passes_saved_jobs_section_name(self, mock_page):
+ async def evaluate_side_effect(js, *args):
+ if 'button[aria-label^="Page "]' in js:
+ return 1
+ if "jobs/view" in js:
+ return ["111", "222"]
+ return None
+
+ mock_page.evaluate = AsyncMock(side_effect=evaluate_side_effect)
+ mock_next = MagicMock()
+ mock_next.count = AsyncMock(return_value=0)
+ mock_page.locator = MagicMock(return_value=mock_next)
+ extractor = LinkedInExtractor(mock_page)
+
+ with patch.object(
+ extractor,
+ "extract_page",
+ new_callable=AsyncMock,
+ return_value=extracted("Saved Job 1\nSaved Job 2"),
+ ) as mock_extract_page:
+ await extractor.scrape_saved_jobs()
+
+ mock_extract_page.assert_awaited_once_with(
+ "https://www.linkedin.com/jobs-tracker/",
+ "saved_jobs",
+ )
+
+ async def test_scrape_saved_jobs_paginates(self, mock_page):
+ """Clicks page buttons, collects IDs, and refreshes progress totals."""
+ # Page 1 returns IDs 111, 222; page 2 returns 333, 444
+ call_count = 0
+
+ async def evaluate_side_effect(js, *args):
+ nonlocal call_count
+ call_count += 1
+ if 'button[aria-label^="Page "]' in js:
+ return 1 if call_count == 2 else 4
+ if "jobs/view" in js:
+ # First call: page 1 IDs; second call: page 2 IDs
+ if call_count <= 3:
+ return ["111", "222"]
+ return ["333", "444"]
+ if "innerText" in js:
+ return "Page 2 jobs"
+ return None
+
+ mock_page.evaluate = AsyncMock(side_effect=evaluate_side_effect)
+
+ # Page button exists for page 2, not for page 3
+ page_btn_click_count = 0
+ mock_page_btn = MagicMock()
+
+ async def page_btn_count():
+ return 1 if page_btn_click_count == 0 else 0
+
+ mock_page_btn.count = AsyncMock(side_effect=page_btn_count)
+ mock_page_btn.scroll_into_view_if_needed = AsyncMock()
+
+ async def page_btn_click():
+ nonlocal page_btn_click_count
+ page_btn_click_count += 1
+
+ mock_page_btn.click = AsyncMock(side_effect=page_btn_click)
+ mock_page.locator = MagicMock(return_value=mock_page_btn)
+ mock_page.wait_for_function = AsyncMock()
+ on_progress = AsyncMock()
+
+ extractor = LinkedInExtractor(mock_page)
+ with (
+ patch.object(
+ extractor,
+ "extract_page",
+ new_callable=AsyncMock,
+ return_value=extracted("Page 1 jobs"),
+ ),
+ patch(
+ "linkedin_mcp_server.scraping.extractor.scroll_to_bottom",
+ new_callable=AsyncMock,
+ ),
+ patch(
+ "linkedin_mcp_server.scraping.extractor.asyncio.sleep",
+ new_callable=AsyncMock,
+ ),
+ ):
+ result = await extractor.scrape_saved_jobs(on_progress=on_progress)
+
+ assert result["job_ids"] == ["111", "222", "333", "444"]
+ assert "Page 1 jobs" in result["sections"]["saved_jobs"]
+ assert "Page 2 jobs" in result["sections"]["saved_jobs"]
+ for jid in ["111", "222", "333", "444"]:
+ assert f"Job ID: {jid}" in result["sections"]["saved_jobs"]
+ # Progress was reported for both pages
+ assert on_progress.await_count == 2
+ assert on_progress.await_args_list[0].args == (
+ 1,
+ 1,
+ "Fetched saved jobs page 1",
+ )
+ assert on_progress.await_args_list[1].args == (
+ 2,
+ 4,
+ "Fetched saved jobs page 2",
+ )
+
+ async def test_scrape_saved_jobs_timeout_stops_gracefully(self, mock_page):
+ """PlaywrightTimeoutError on page 2 returns page 1 results only."""
+ from patchright.async_api import TimeoutError as PlaywrightTimeoutError
+
+ async def evaluate_side_effect(js, *args):
+ if 'button[aria-label^="Page "]' in js:
+ return 2
+ if "jobs/view" in js:
+ return ["111", "222"]
+ return None
+
+ mock_page.evaluate = AsyncMock(side_effect=evaluate_side_effect)
+
+ mock_page_btn = MagicMock()
+ mock_page_btn.count = AsyncMock(return_value=1)
+ mock_page_btn.scroll_into_view_if_needed = AsyncMock()
+ mock_page_btn.click = AsyncMock()
+ mock_page.locator = MagicMock(return_value=mock_page_btn)
+ mock_page.wait_for_function = AsyncMock(
+ side_effect=PlaywrightTimeoutError("Timeout")
+ )
+
+ extractor = LinkedInExtractor(mock_page)
+ with (
+ patch.object(
+ extractor,
+ "extract_page",
+ new_callable=AsyncMock,
+ return_value=extracted("Page 1 jobs"),
+ ),
+ patch(
+ "linkedin_mcp_server.scraping.extractor.asyncio.sleep",
+ new_callable=AsyncMock,
+ ),
+ ):
+ result = await extractor.scrape_saved_jobs()
+
+ assert result["job_ids"] == ["111", "222"]
+ assert "Job ID: 111" in result["sections"]["saved_jobs"]
+ assert "Job ID: 222" in result["sections"]["saved_jobs"]
+
+ async def test_scrape_saved_jobs_stops_at_max_pages_despite_more_buttons(
+ self, mock_page
+ ):
+ """max_pages=1 stops after page 1 even if more buttons exist."""
+
+ async def evaluate_side_effect(js, *args):
+ if 'button[aria-label^="Page "]' in js:
+ return 3
+ if "jobs/view" in js:
+ return ["111", "222"]
+ return None
+
+ mock_page.evaluate = AsyncMock(side_effect=evaluate_side_effect)
+
+ # Simulate page buttons existing (count=3) but max_pages=1
+ mock_page_btn = MagicMock()
+ mock_page_btn.count = AsyncMock(return_value=3)
+ mock_page.locator = MagicMock(return_value=mock_page_btn)
+
+ extractor = LinkedInExtractor(mock_page)
+ with patch.object(
+ extractor,
+ "extract_page",
+ new_callable=AsyncMock,
+ return_value=extracted("Page 1 jobs"),
+ ):
+ result = await extractor.scrape_saved_jobs(max_pages=1)
+
+ assert result["job_ids"] == ["111", "222"]
+ # click should never have been called (loop range(2, 2) is empty)
+ mock_page_btn.click.assert_not_called()
+
+ async def test_scrape_saved_jobs_empty(self, mock_page):
+ async def evaluate_side_effect(js, *args):
+ if 'button[aria-label^="Page "]' in js:
+ return 1
+ if "jobs/view" in js:
+ return []
+ return None
+
+ mock_page.evaluate = AsyncMock(side_effect=evaluate_side_effect)
+ mock_next = MagicMock()
+ mock_next.count = AsyncMock(return_value=0)
+ mock_page.locator = MagicMock(return_value=mock_next)
+ extractor = LinkedInExtractor(mock_page)
+ with patch.object(
+ extractor,
+ "extract_page",
+ new_callable=AsyncMock,
+ return_value=extracted(""),
+ ):
+ result = await extractor.scrape_saved_jobs()
+
+ assert result["url"] == "https://www.linkedin.com/jobs-tracker/"
+ assert result["sections"] == {}
+ assert "sections_requested" not in result
+ assert result["job_ids"] == []
+
+
class TestStripLinkedInNoise:
def test_strips_footer(self):
text = "Bill Gates\nChair, Gates Foundation\n\nAbout\nAccessibility\nTalent Solutions\nCareers"
diff --git a/tests/test_session_state.py b/tests/test_session_state.py
index a077d7cc..fd4d9b01 100644
--- a/tests/test_session_state.py
+++ b/tests/test_session_state.py
@@ -152,7 +152,7 @@ def test_get_runtime_id_marks_container(monkeypatch):
)
monkeypatch.setattr(
"linkedin_mcp_server.session_state.Path.exists",
- lambda self: str(self) == "/.dockerenv",
+ lambda self: self.as_posix() == "/.dockerenv",
)
assert get_runtime_id() == "linux-amd64-container"
@@ -167,7 +167,7 @@ def test_get_runtime_id_marks_container_from_cgroup_v2_mountinfo(monkeypatch):
)
monkeypatch.setattr(
"linkedin_mcp_server.session_state.Path.exists",
- lambda self: str(self) == "/proc/1/mountinfo",
+ lambda self: self.as_posix() == "/proc/1/mountinfo",
)
monkeypatch.setattr(
"linkedin_mcp_server.session_state.Path.read_text",
@@ -189,7 +189,7 @@ def test_get_runtime_id_ignores_non_root_overlay_mounts(monkeypatch):
)
monkeypatch.setattr(
"linkedin_mcp_server.session_state.Path.exists",
- lambda self: str(self) == "/proc/1/mountinfo",
+ lambda self: self.as_posix() == "/proc/1/mountinfo",
)
monkeypatch.setattr(
"linkedin_mcp_server.session_state.Path.read_text",
diff --git a/tests/test_tools.py b/tests/test_tools.py
index 529d551f..56192649 100644
--- a/tests/test_tools.py
+++ b/tests/test_tools.py
@@ -28,6 +28,7 @@ def _make_mock_extractor(scrape_result: dict) -> MagicMock:
mock.scrape_job = AsyncMock(return_value=scrape_result)
mock.search_jobs = AsyncMock(return_value=scrape_result)
mock.search_people = AsyncMock(return_value=scrape_result)
+ mock.scrape_saved_jobs = AsyncMock(return_value=scrape_result)
mock.get_sidebar_profiles = AsyncMock(return_value=scrape_result)
mock.get_inbox = AsyncMock(return_value=scrape_result)
mock.get_conversation = AsyncMock(return_value=scrape_result)
@@ -517,6 +518,26 @@ async def test_get_job_details(self, mock_context):
assert "job_posting" in result["sections"]
assert "pages_visited" not in result
+ async def test_get_saved_jobs(self, mock_context):
+ expected = {
+ "url": "https://www.linkedin.com/jobs-tracker/",
+ "sections": {"saved_jobs": "Saved Job 1\nSaved Job 2"},
+ "job_ids": ["111", "222"],
+ }
+ mock_extractor = _make_mock_extractor(expected)
+
+ from linkedin_mcp_server.tools.job import register_job_tools
+
+ mcp = FastMCP("test")
+ register_job_tools(mcp)
+
+ tool_fn = await get_tool_fn(mcp, "get_saved_jobs")
+ result = await tool_fn(mock_context, extractor=mock_extractor)
+ assert "saved_jobs" in result["sections"]
+ assert result["url"] == "https://www.linkedin.com/jobs-tracker/"
+ assert "pages_visited" not in result
+ assert "sections_requested" not in result
+
async def test_search_jobs(self, mock_context):
expected = {
"url": "https://www.linkedin.com/jobs/search/?keywords=python",