【揭秘】Python JA3指纹绕过实战指南

requests/scrapy JA3指纹绕过

requests绕过

import requests
import random
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.ssl_ import create_urllib3_context

ORIGIN_CIPHERS = (
    "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:"
    "DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES"
)


class DESAdapter(HTTPAdapter):
    def __init__(self, *args, **kwargs):
        """
        A TransportAdapter that re-enables 3DES support in Requests.
        """
        CIPHERS = ORIGIN_CIPHERS.split(":")
        random.shuffle(CIPHERS)
        CIPHERS = ":".join(CIPHERS)
        self.CIPHERS = CIPHERS + ":!aNULL:!eNULL:!MD5"
        super().__init__(*args, **kwargs)

    def init_poolmanager(self, *args, **kwargs):
        context = create_urllib3_context(ciphers=self.CIPHERS)
        kwargs["ssl_context"] = context
        return super(DESAdapter, self).init_poolmanager(*args, **kwargs)

    def proxy_manager_for(self, *args, **kwargs):
        context = create_urllib3_context(ciphers=self.CIPHERS)
        kwargs["ssl_context"] = context
        return super(DESAdapter, self).proxy_manager_for(*args, **kwargs)


s = requests.session()
# s.headers.update(headers)
s.mount("网站域名", DESAdapter())

scrapy绕过

新建一个中间件

from scrapy.core.downloader.handlers.http import HTTPDownloadHandler
from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory
import random
# scrapy.core.downloader.handlers.http.HTTPDownloadHandler


ORIGIN_CIPHERS ='TLS13-AES-256-GCM-SHA384:TLS13-CHACHA20-POLY1305-SHA256:TLS13-AES-128-GCM-SHA256:ECDH+AESGCM:ECDH+CHACHA20:DH+AESGCM:DH+CHACHA20:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES'
def shuffle_ciphers():
    ciphers = ORIGIN_CIPHERS.split(":")
    random.shuffle(ciphers)
    ciphers = ":".join(ciphers)
    return ciphers + ":!aNULL:!MD5:!DSS"



class MyHTTPDownloadHandler(HTTPDownloadHandler):
    def download_request(self, request, spider):
        tls_cliphers = shuffle_ciphers()
        self._contextFactory = ScrapyClientContextFactory(tls_ciphers=tls_cliphers)
        return super().download_request(request, spider)

在setting中把这个中间件启动

‘DOWNLOAD_HANDLERS’: {
‘http’: ‘xxxx.handler.MyHTTPDownloadHandler’,
‘https’: ‘xxxx.handler.MyHTTPDownloadHandler’,
}

详情可以借鉴

https://blog.51cto.com/u_15023263/4862013

aiohttp绕过

import asyncio
import random
import ssl

# ssl._create_default_https_context = ssl._create_unverified_context
import aiohttp

ORIGIN_CIPHERS = ('ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
                  'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES')


class SSLFactory:
    def __init__(self):
        self.ciphers = ORIGIN_CIPHERS.split(":")

    def __call__(self, *args, **kwargs) -> ssl.SSLContext:
        random.shuffle(self.ciphers)
        ciphers = ":".join(self.ciphers)
        ciphers = ciphers + ":!aNULL:!eNULL:!MD5"

        context = ssl.create_default_context()
        context.set_ciphers(ciphers)
        return context


async def main():
    sslgen = SSLFactory()
    async with aiohttp.ClientSession() as session:
        for _ in range(5):
            async with session.get("https://tls.browserleaks.com/json", headers={}, ssl=sslgen()) as response:
                data = await response.json()
            print(data)


if __name__ == '__main__':
    asyncio.get_event_loop().run_until_complete(main())

检测JA3的网站

https://tls.browserleaks.com/json
https://tls.peet.ws/
https://kawayiyi.com/tls

作者:kisloy

物联沃分享整理
物联沃-IOTWORD物联网 » 【揭秘】Python JA3指纹绕过实战指南

发表回复