1688 平台的店铺商品数据是供应链分析与竞品调研的核心依据,包含批发价、起订量、品类分布等关键信息。与单商品接口相比,店铺全商品接口需解决分页加载、品类筛选、反爬限制等特殊挑战。本文系统阐述 1688 店铺全商品接口的技术实现方案,重点突破店铺 ID 解析、多页数据采集、品类精准筛选等核心问题,提供一套合规高效的技术架构,严格遵循平台规则与数据采集规范。
一、1688 店铺商品接口架构与合规准则
1688 店铺商品数据通过 "店铺首页→商品列表页→分页加载" 的层级架构呈现,核心接口为店铺商品列表分页接口,支持按品类、销量等多维度筛选。技术实现需遵循以下合规准则:
•请求频率控制:单店铺采集需保持≥15 秒的页面请求间隔,单日最大采集次数不超过 3 次
•数据范围限制:仅采集公开商品信息,严禁获取店铺交易数据、客户信息等隐私内容
•商业用途合规:数据仅限市场调研使用,不得用于恶意竞争或商业诋毁行为
•反爬机制尊重:不伪造请求头或破解接口加密,完全模拟正常用户浏览行为
•店铺全商品采集核心技术流程:
•
店铺ID解析 → 首页品类提取 → 分页参数构造 → 分布式请求调度 → 数据解析与去重 → 结构化存储
二、核心技术实现方案
1. 店铺 ID 解析器(适配 1688 URL 特色格式)
1688 店铺 URL 格式多样,需从不同 URL 格式中精准解析店铺唯一标识(memberId)并提取基础信息:
import reimport requestsfrom lxml import etreeclass AlibabaShopParser: """1688店铺信息与ID解析器""" def __init__(self): self.headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Referer": "https://www.1688.com/" } # 店铺URL匹配模式 self.shop_patterns = [ r"https?://(\w+)\.1688\.com", # 主域名模式:https://abc123.1688.com r"https?://shop(\d+)\.1688\.com", # 数字ID模式:https://shop123456789.1688.com r"https?://www\.1688\.com/shop/view_shop\.htm\?memberId=(\w+)" # 标准店铺页 ] def extract_shop_id(self, shop_url): """从店铺URL提取memberId(店铺唯一标识)""" for pattern in self.shop_patterns: match = re.search(pattern, shop_url) if match: return match.group(1) # URL直接解析失败,尝试从页面内容提取 return self._extract_id_from_page(shop_url) def _extract_id_from_page(self, shop_url): """从店铺页面内容提取memberId""" try: response = requests.get( shop_url, headers=self.headers, timeout=15, allow_redirects=True ) response.encoding = "utf-8" # 从meta标签提取 tree = etree.HTML(response.text) member_id_meta = tree.xpath('//meta[@name="memberId"]/@content') if member_id_meta and member_id_meta[0]: return member_id_meta[0] # 从脚本标签提取 scripts = tree.xpath('//script/text()') for script in scripts: match = re.search(r'memberId\s*[:=]\s*["\'](\w+)["\']', script) if match: return match.group(1) return None except Exception as e: print(f"页面提取店铺ID失败: {str(e)}") return None
2. 分页参数生成器(适配 B 端分页逻辑)
1688 店铺商品采用特殊分页机制,不同排序方式和筛选条件对应不同参数规则:
import timeimport randomimport hashlibimport urllib.parseclass AlibabaShopProductParamsGenerator: """1688店铺商品分页参数生成器""" def __init__(self): self.base_url = "https://offerlist.1688.com/offerlist.htm" # 排序方式映射 self.sort_mapping = { "default": "", # 默认排序 "newest": "create_desc", # 最新上架 "price_asc": "price_asc", # 价格从低到高 "price_desc": "price_desc", # 价格从高到低 "sales": "volume_desc" # 销量从高到低 } def generate_params(self, member_id, page=1, sort="default", category_id="", **filters): """ 生成店铺商品列表请求参数 :param member_id: 店铺memberId :param page: 页码 :param sort: 排序方式 :param category_id: 分类ID(空表示全部) :param filters: 筛选条件,支持: - min_price: 最低价格 - max_price: 最高价格 - is_wholesale: 是否批发(True/False) :return: 完整参数字典 """ params = { "memberId": member_id, "pageNum": page, "pageSize": 60, # 每页最大商品数 "sortType": self.sort_mapping.get(sort, ""), "categoryId": category_id, "offline": "false", # 只显示在线商品 "sample": "false", # 不显示样品 "isNoReload": "true", "enableAsync": "true", "async": "true", "_input_charset": "UTF-8", "timestamp": str(int(time.time() * 1000)), "rn": str(random.randint(1000000000, 9999999999)) } # 添加价格筛选 if "min_price" in filters and filters["min_price"]: params["priceStart"] = filters["min_price"] if "max_price" in filters and filters["max_price"]: params["priceEnd"] = filters["max_price"] # 添加批发筛选 if "is_wholesale" in filters and filters["is_wholesale"]: params["wholesale"] = "true" # 生成签名(部分接口需要) if random.random() > 0.5: # 模拟部分请求需要签名的场景 params["sign"] = self._generate_sign(params) return params
3. 请求调度器(应对 B 端反爬限制)
针对 1688 严格的反爬限制,实现会话保持、代理轮换、请求间隔控制等策略:
import timeimport randomimport requestsfrom fake_useragent import UserAgentimport urllib3# 禁用不安全请求警告urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)class AlibabaShopProductRequester: """1688店铺商品请求调度器""" def __init__(self, proxy_pool=None): self.proxy_pool = proxy_pool or [] self.ua = UserAgent() self.session = self._init_session() self.last_request_time = 0 self.min_interval = 15 # 页面请求最小间隔(秒) self.max_retries = 3 # 最大重试次数 def _init_session(self): """初始化会话,获取基础Cookie""" session = requests.Session() session.headers.update({ "User-Agent": self.ua.random, "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "zh-CN,zh;q=0.9", "Connection": "keep-alive", "Referer": "https://www.1688.com/", "Upgrade-Insecure-Requests": "1" }) # 预访问1688首页获取必要Cookie session.get("https://www.1688.com", verify=False, timeout=10) return session def _control_request_interval(self): """控制请求间隔,避免触发反爬""" current_time = time.time() elapsed = current_time - self.last_request_time if elapsed < self.min_interval: sleep_time = self.min_interval - elapsed + random.uniform(2, 5) print(f"请求间隔不足,休眠 {sleep_time:.1f} 秒") time.sleep(sleep_time) self.last_request_time = time.time()
4. 商品数据解析器(提取 B 端特色字段)
解析商品列表页面,提取批发价、起订量、销量等 B 端特色数据,处理分页信息:
import reimport jsonfrom datetime import datetimefrom lxml import etreeclass AlibabaShopProductParser: """1688店铺商品数据解析器""" def __init__(self): # 匹配商品数据的正则表达式 self.product_data_pattern = re.compile(r'window\.__page__data__\s*=\s*({.*?});\s*</script>', re.DOTALL) self.offer_list_pattern = re.compile(r'offerList\s*:\s*(\[.*?\])', re.DOTALL) def parse_products_page(self, html_content): """解析店铺商品列表页面""" if not html_content: return None # 尝试从页面提取JSON数据 json_data = self._extract_json_data(html_content) if json_data: return self._parse_from_json(json_data) # JSON解析失败,尝试从HTML解析 return self._parse_from_html(html_content) def _extract_json_data(self, html_content): """从页面提取JSON数据""" match = self.product_data_pattern.search(html_content) if match: try: return json.loads(match.group(1)) except json.JSONDecodeError: print("JSON数据解析失败") # 尝试提取简化的商品列表数据 match = self.offer_list_pattern.search(html_content) if match: try: return {"offerList": json.loads(match.group(1))} except json.JSONDecodeError: print("商品列表数据解析失败") return None
5. 分类采集器(支持多线程并行采集)
基于线程池实现按分类并行采集,提高采集效率同时控制资源占用:
from concurrent.futures import ThreadPoolExecutor, as_completedimport timeclass AlibabaShopCategoryCollector: """店铺商品分类采集器""" def __init__(self, requester, parser, params_generator): self.requester = requester self.parser = parser self.params_generator = params_generator self.max_workers = 2 # 分类采集并发数(不宜过高) def collect_by_category(self, member_id, categories, max_pages_per_cat=3): """ 按分类采集店铺商品 :param member_id: 店铺ID :param categories: 分类列表(从AlibabaShopParser获取) :param max_pages_per_cat: 每个分类最大采集页数 :return: 合并后的商品列表 """ if not categories: print("没有分类信息,无法按分类采集") return None all_products = [] category_results = {} with ThreadPoolExecutor(max_workers=self.max_workers) as executor: # 提交分类采集任务 future_tasks = {} for cat in categories: future = executor.submit( self._collect_single_category, member_id, cat, max_pages_per_cat ) future_tasks[future] = cat["category_name"] # 处理任务结果 for future in as_completed(future_tasks): cat_name = future_tasks[future] try: result = future.result() if result and result["products"]: category_results[cat_name] = result all_products.extend(result["products"]) print(f"分类 [{cat_name}] 采集完成,获取 {len(result['products'])} 个商品") else: print(f"分类 [{cat_name}] 采集失败或无商品") except Exception as e: print(f"分类 [{cat_name}] 采集异常: {str(e)}") # 去重并添加分类信息 unique_products = self.parser.remove_duplicates(all_products) for product in unique_products: # 为每个商品添加所属分类 for cat_name, cat_data in category_results.items(): if product in cat_data["products"]: product["category"] = cat_name break return { "total_products": len(unique_products), "category_counts": {k: len(v["products"]) for k, v in category_results.items()}, "products": unique_products }
三、完整店铺商品采集服务封装
整合上述组件,实现完整的店铺商品采集服务,支持全量采集与分类采集两种模式:
class AlibabaShopProductService: """1688店铺商品完整采集服务""" def __init__(self, proxy_pool=None): self.shop_parser = AlibabaShopParser() self.params_generator = AlibabaShopProductParamsGenerator() self.requester = AlibabaShopProductRequester(proxy_pool=proxy_pool) self.product_parser = AlibabaShopProductParser() self.category_collector = AlibabaShopCategoryCollector( self.requester, self.product_parser, self.params_generator ) def collect_shop_products(self, shop_url, max_pages=5, by_category=False, max_pages_per_cat=3): """ 采集店铺所有商品 :param shop_url: 店铺URL :param max_pages: 最大采集页数(全量采集时) :param by_category: 是否按分类采集 :param max_pages_per_cat: 每个分类最大采集页数 :return: 包含店铺信息和商品列表的字典 """ # 1. 获取店铺基础信息 print("获取店铺基础信息...") shop_info = self.shop_parser.get_shop_base_info(shop_url) if not shop_info or not shop_info["member_id"]: print("无法获取店铺信息,采集终止") return None member_id = shop_info["member_id"] print(f"店铺信息:{shop_info['shop_name']} (ID: {member_id})") # 2. 获取店铺分类 print("获取店铺商品分类...") categories = self.shop_parser.get_shop_categories(member_id) if categories: print(f"发现 {len(categories)} 个商品分类:{[c['category_name'] for c in categories]}") else: print("未获取到店铺分类信息") by_category = False # 无法按分类采集 # 3. 采集商品 if by_category and categories: # 按分类采集 print("开始按分类采集商品...") product_result = self.category_collector.collect_by_category( member_id=member_id, categories=categories, max_pages_per_cat=max_pages_per_cat ) else: # 全量采集 print("开始全量采集商品...") product_result = self._collect_all_products( member_id=member_id, max_pages=max_pages ) if not product_result or not product_result["products"]: print("未采集到任何商品") return None # 4. 整合结果 return { "shop_info": shop_info, "collection_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "total_products": product_result["total_products"], "category_distribution": product_result.get("category_counts", {}), "products": product_result["products"] }
四、使用示例与数据存储分析
1. 基本使用示例
def main(): # 代理池(实际使用时替换为有效代理) proxy_pool = [ # "http://123.123.123.123:8080", # "http://111.111.111.111:8888" ] # 初始化店铺商品采集服务 service = AlibabaShopProductService(proxy_pool=proxy_pool) # 店铺URL(替换为实际店铺URL) shop_url = "https://shop123456789.1688.com" # 采集店铺商品(按分类采集,每个分类最多2页) result = service.collect_shop_products( shop_url=shop_url, by_category=True, max_pages_per_cat=2 ) # 处理结果 if result: print(f"\n采集完成!共获取 {result['total_products']} 个商品") # 打印店铺信息 print(f"\n店铺名称:{result['shop_info']['shop_name']}") print(f"主营类目:{result['shop_info']['main_category']}") print(f"经营年限:{result['shop_info']['operation_years']}") print(f"诚信等级:{result['shop_info']['credit_level']}")
2. 数据存储与分析工具
import jsonimport csvimport pandas as pdimport matplotlib.pyplot as pltfrom pathlib import Pathfrom datetime import datetime# 设置中文显示plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]class ShopProductStorageAnalyzer: """店铺商品数据存储与分析工具""" def __init__(self, storage_dir="./1688_shop_products"): self.storage_dir = Path(storage_dir) self.storage_dir.mkdir(exist_ok=True, parents=True) def save_results(self, result): """保存采集结果""" shop_name = result["shop_info"]["shop_name"].replace('/', '_') timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") # 保存完整结果(JSON) json_path = self.storage_dir / f"{shop_name}_full_{timestamp}.json" with open(json_path, "w", encoding="utf-8") as f: json.dump(result, f, ensure_ascii=False, indent=2, default=str) # 保存商品列表(CSV) csv_path = self.storage_dir / f"{shop_name}_products_{timestamp}.csv" self._save_products_to_csv(result["products"], csv_path) print(f"数据已保存至:\n- {json_path}\n- {csv_path}") return json_path, csv_path def analyze_shop_products(self, result): """分析店铺商品数据""" if not result or not result["products"]: return None print("\n开始分析店铺商品数据...") products = result["products"] shop_name = result["shop_info"]["shop_name"] # 1. 分类分布分析 self._analyze_category_distribution(products, shop_name) # 2. 价格分布分析 self._analyze_price_distribution(products, shop_name) # 3. 起订量分析 self._analyze_min_order(products, shop_name) # 4. 销量与价格关系 self._analyze_sales_vs_price(products, shop_name) return True
五、合规优化与风险提示
1. 系统优化策略
•增量采集机制:记录已采集商品 ID,仅采集新增或更新的商品
•
def incremental_collect(self, shop_url, last_collected_ids): """增量采集:仅获取新商品""" # 实现逻辑... return new_products
•智能缓存策略:缓存店铺分类信息和已采集商品,减少重复请求
•分布式采集:大规模采集时采用分布式架构,分散 IP 压力
•
2. 合规与风险提示
•商业应用前必须获得 1688 平台和店铺的书面授权,遵守《电子商务法》
•单店铺采集频率不宜过高,建议间隔 24 小时以上重复采集
•不得将采集的店铺商品数据用于生成与该店铺竞争的产品或服务
•尊重店铺商业信息,不滥用数据进行价格战或恶意竞争
•当检测到反爬机制触发时,应立即停止采集并间隔 48 小时以上再试
•通过本文提供的技术方案,可构建一套功能完善的 1688 店铺全商品采集系统。该方案针对 B2B 电商特色进行了专项优化,支持按分类采集、商品去重、数据分布分析等功能,为供应链分析、竞品调研等场景提供坚实技术支持。在实际应用中,需特别注意平台对店铺批量采集的严格限制,确保合规使用。若在技术实践中遇到接口适配、反爬策略优化等问题,欢迎基于本文方案进行技术探讨与经验交流。
•