Lines

INVENTED
WORLDS

Ripper

Sketchfab

Качаем в хром tampermonkey Добавляем скрипт (панель управления -> +):

// ==UserScript==
// @name         SketchfabDownloader (Models + Textures)
// @version      0.9.1
// @description  Download Sketchfab models and textures!
// @author       shitposting goddess + 20 Jun 2023 page not loading FIX
// @support      https://discord.gg/ANfE28gPyY
// @include      /^https?://(www\.)?sketchfab\.com/.*
// @require      https://cdnjs.cloudflare.com/ajax/libs/jszip/3.1.5/jszip.min.js
// @require      https://cdnjs.cloudflare.com/ajax/libs/jszip-utils/0.0.2/jszip-utils.min.js
// @require      https://cdnjs.cloudflare.com/ajax/libs/FileSaver.js/1.3.8/FileSaver.js
// @run-at       document-start
// @grant        unsafeWindow
// @grant        GM_download
// ==/UserScript==

var zip = new JSZip();
let folder = zip.folder('collection');

var button_dw = false;
var func_drawGeometry = /(this\._stateCache\.drawGeometry\(this\._graphicContext,t\))/g;
var fund_drawArrays = /t\.drawArrays\(t\.TRIANGLES,0,6\)/g;
//var func_renderInto1 = /x\.renderInto\(n,S,y/g;
var func_renderInto1 = /A\.renderInto\(n,E,R/g; //20 jun 2023 fix
var func_renderInto2 = /g\.renderInto=function\(e,i,r/g;
var func_getResourceImage = /getResourceImage:function\(e,t\){/g;

var func_test = /apply:function\(e\){var t=e instanceof r\.Geometry;/g

var addbtnfunc;

(function() {
    'use strict';
    var window = unsafeWindow;
    console.log("[UserScript]init", window);


    window.allmodel = [];
    var saveimagecache2 = {};
    var objects = {};


    var saveimage_to_list = function(url,file_name)
    {
        if (!saveimagecache2[url])
        {
            var mdl = {
                name: file_name
            }

            saveimagecache2[url] = mdl;
        }
    }

    addbtnfunc = function() {
        var p = document.evaluate("//div[@class='titlebar']", document, null, 9, null).singleNodeValue;
        if(p && !button_dw) {
            console.log("[UserScript]add btn dwnld");
            var btn = document.createElement("a");
            btn.setAttribute("class", "control");
            btn.innerHTML = "<pre style='font-family:impact;font-size:36px;text-shadow: 1px 1px black;color:#1caad9;'>CLICK TO DOWNLOAD</pre>";
            btn.addEventListener('click', () => {
                    alert('Zipping files...')})
            btn.addEventListener("click", dodownload , false);
            p.appendChild(btn);
            button_dw = true;
        } else {
            console.log("[UserScript]try add btn later");
            setTimeout(addbtnfunc, 3000);
        }
    }

    var dodownload = function() {
        console.log("[UserScript]download");
        var idx = 0;
        window.allmodel.forEach(function(obj)
        {
            var mdl = {
                name: "model_"+idx,
                obj:parseobj(obj)
            }
            console.log(mdl);
            dosavefile(mdl);
            idx++;

        })
        PackAll();
    }

    var PackAll = function ()
        {
            for (var obj in objects) {
                console.log("[UserScript]save file", obj);
                folder.file(obj, objects[obj], {binary:true});
              }

            var file_name = document.getElementsByClassName('model-name__label')[0].textContent;
            folder.generateAsync({ type: "blob" }).then(content => saveAs(content, file_name + ".zip"));
        }

        var parseobj = function(obj)
        {
            console.log("[UserScript]: obj", obj);
            var list = [];
            obj._primitives.forEach(function(p) {
                if(p && p.indices) {
                    list.push({
                        'mode' : p.mode,
                        'indices' : p.indices._elements
                    });
                }
            })

            var attr = obj._attributes;
            return {
                vertex: attr.Vertex._elements,
                normal: attr.Normal ? attr.Normal._elements : [],
                uv: attr.TexCoord0 ? attr.TexCoord0._elements :
                attr.TexCoord1 ? attr.TexCoord1._elements :
                attr.TexCoord2 ? attr.TexCoord2._elements :
                attr.TexCoord2 ? attr.TexCoord2._elements :
                attr.TexCoord3 ? attr.TexCoord3._elements :
                attr.TexCoord4 ? attr.TexCoord4._elements :
                attr.TexCoord5 ? attr.TexCoord5._elements :
                attr.TexCoord6 ? attr.TexCoord6._elements :
                attr.TexCoord7 ? attr.TexCoord7._elements :
                attr.TexCoord8 ? attr.TexCoord8._elements :  [],
                primitives: list,
            };
        }

        var dosavefile = function(mdl)
        {
            var obj = mdl.obj;

            //console.log("TEST");
            //console.log(obj);

            var str = '';
            str += 'mtllib ' + mdl.name + '.mtl\n';
            str += 'o ' + mdl.name + '\n';
            for (var i = 0; i < obj.vertex.length; i += 3) {
                str += 'v ';
                for (var j = 0; j < 3; ++j) {
                    str += obj.vertex[i + j] + ' ';
                }
                str += '\n';
            }
            for (i = 0; i < obj.normal.length; i += 3) {
                str += 'vn ';
                for (j = 0; j < 3; ++j) {
                    str += obj.normal[i + j] + ' ';
                }
                str += '\n';
            }

            for (i = 0; i < obj.uv.length; i += 2) {
                str += 'vt ';
                for (j = 0; j < 2; ++j) {
                    str += obj.uv[i + j] + ' ';
                }
                str += '\n';
            }
            //str += 'usemtl ' + mdl.name + '\n';
            str += 's on \n';

            var vn = obj.normal.length != 0;
            var vt = obj.uv.length != 0;

            for (i = 0; i < obj.primitives.length; ++i) {
                var primitive = obj.primitives[i];
                if (primitive.mode == 4 || primitive.mode == 5) {
                    var strip = (primitive.mode == 5);
                    for (j = 0; j + 2 < primitive.indices.length; !strip ? j += 3 : j++) {
                        str += 'f ';
                        var order = [ 0, 1, 2];
                        if (strip && (j % 2 == 1)) {
                            order = [ 0, 2, 1];
                        }
                        for (var k = 0; k < 3; ++k)
                        {
                            var faceNum = primitive.indices[j + order[k]] + 1;
                            str += faceNum;
                            if (vn || vt) {
                                str += '/';
                                if (vt) {
                                    str += faceNum;
                                }
                                if (vn) {
                                    str += '/' + faceNum;
                                }
                            }
                            str += ' ';
                        }
                        str += '\n';
                    }
                }
                else {
                    console.log("[UserScript]dosavefile: unknown primitive mode", primitive);
                }
            }

            str += '\n';

            var objblob = new Blob([str], {type:'text/plain'});

            objects[mdl.name+".obj"] = objblob;
        }


        window.attachbody = function(obj)
        {
             if(obj._faked != true && ((obj.stateset && obj.stateset._name) || obj._name || (obj._parents && obj._parents[0]._name)) ) {
                 obj._faked = true;
                 if(obj._name == "composer layer" || obj._name == "Ground - Geometry") return;
                 window.allmodel.push(obj)
                 console.log(obj);
             }
            //console.log(obj);
        }


        window.hook_test = function(e, idx)
        {
            console.log("hooked index: "+idx);
            console.log(e);
        }
        window.drawhookcanvas = function(e, imagemodel)
        {

            if((e.width == 128 && e.height == 128) || (e.width == 32 && e.height == 32) || (e.width == 64 && e.height == 64))
            {
                return e;
            }
            if(imagemodel)
            {
                var alpha = e.options.format;
                var filename_image =  imagemodel.attributes.name;
                var uid = imagemodel.attributes.uid;
                var url_image = e.url;
                var max_size = 0;
                var obr = e;
                imagemodel.attributes.images.forEach(function(img)
                {
                    var alpha_is_check = alpha == "A" ? img.options.format == alpha : true;

                    var d = img.width;
                    while ( d % 2 == 0 )
                    {
                        d = d / 2;
                    }

                    if(img.size > max_size && alpha_is_check && d == 1)
                    {
                        max_size = img.size;
                        url_image = img.url;
                        uid = img.uid;
                        obr = img;
                    }
                });
                if(!saveimagecache2[url_image])
                {
                    console.log(e);
                    saveimage_to_list(url_image, filename_image);
                }
                else
                {
                    //console.log(e);
                }

                return obr;
            }
            return e;
        }

        window.drawhookimg = function(gl,t)
        {
            console.log(JSON.stringify(t));
            var url = t[5].currentSrc;
            var width = t[5].width;
            var height = t[5].height;

            if(!saveimagecache2[url])
            {
                //console.log("rejected:"+url);
                return;
            }
            else
            {
                //console.log("saved texture:"+url);
            }


            var data = new Uint8Array(width * height * 4);
            gl.readPixels(0, 0, width, height, gl.RGBA, gl.UNSIGNED_BYTE, data);

            var halfHeight = height / 2 | 0;  // the | 0 keeps the result an int
            var bytesPerRow = width * 4;

            // make a temp buffer to hold one row
            var temp = new Uint8Array(width * 4);
            for (var y = 0; y < halfHeight; ++y)
            {
            var topOffset = y * bytesPerRow;
            var bottomOffset = (height - y - 1) * bytesPerRow;

            // make copy of a row on the top half
            temp.set(data.subarray(topOffset, topOffset + bytesPerRow));

            // copy a row from the bottom half to the top
            data.copyWithin(topOffset, bottomOffset, bottomOffset + bytesPerRow);

            // copy the copy of the top half row to the bottom half
            data.set(temp, bottomOffset);
            }

             // Create a 2D canvas to store the result
            var canvas = document.createElement('canvas');
            canvas.width = width;
            canvas.height = height;
            var context = canvas.getContext('2d');

            // Copy the pixels to a 2D canvas
            var imageData = context.createImageData(width, height);
            imageData.data.set(data);
            context.putImageData(imageData, 0, 0);

            var re = /(?:\.([^.]+))?$/;
            var ext = re.exec(saveimagecache2[url].name)[1];
            var name = saveimagecache2[url].name+".png";

            if(ext == "png" || ext == "jpg" || ext == "jpeg")
            {
                var ret = saveimagecache2[url].name.replace('.'+ext,'');
                name = ret+".png";
            }
            console.log("saved texture to blob "+name);
            canvas.toBlob(function(blob){objects[name] = blob;},"image/png");
        }

})();

(() => {
            "use strict";
            const Event = class {
                constructor(script, target) {
                    this.script = script;
                    this.target = target;

                    this._cancel = false;
                    this._replace = null;
                    this._stop = false;
                }

                preventDefault() {
                    this._cancel = true;
                }
                stopPropagation() {
                    this._stop = true;
                }
                replacePayload(payload) {
                    this._replace = payload;
                }
            };

            let callbacks = [];
            window.addBeforeScriptExecuteListener = (f) => {
                if (typeof f !== "function") {
                    throw new Error("Event handler must be a function.");
                }
                callbacks.push(f);
            };
            window.removeBeforeScriptExecuteListener = (f) => {
                let i = callbacks.length;
                while (i--) {
                    if (callbacks[i] === f) {
                        callbacks.splice(i, 1);
                    }
                }
            };

            const dispatch = (script, target) => {
                if (script.tagName !== "SCRIPT") {
                    return;
                }

                const e = new Event(script, target);

                if (typeof window.onbeforescriptexecute === "function") {
                    try {
                        window.onbeforescriptexecute(e);
                    } catch (err) {
                        console.error(err);
                    }
                }

                for (const func of callbacks) {
                    if (e._stop) {
                        break;
                    }
                    try {
                        func(e);
                    } catch (err) {
                        console.error(err);
                    }
                }

                if (e._cancel) {
                    script.textContent = "";
                    script.remove();
                } else if (typeof e._replace === "string") {
                    script.textContent = e._replace;
                }
            };
            const observer = new MutationObserver((mutations) => {
                for (const m of mutations) {
                    for (const n of m.addedNodes) {
                        dispatch(n, m.target);
                    }
                }
            });
            observer.observe(document, {
                childList: true,
                subtree: true,
            });
        })();

(() => {
            "use strict";

            window.onbeforescriptexecute = (e) => {
                var links_as_arr = Array.from(e.target.childNodes);

                links_as_arr.forEach(function(srimgc)
                {
                    if(srimgc instanceof HTMLScriptElement)
                    {
                        if (srimgc.src.indexOf("web/dist/") >= 0 || srimgc.src.indexOf("standaloneViewer") >= 0)
                        {
                            e.preventDefault();
                            e.stopPropagation();
                            var req = new XMLHttpRequest();
                            req.open('GET', srimgc.src, false);
                            req.send('');
                            var jstext = req.responseText;
                            var ret = func_renderInto1.exec(jstext);

                            if (ret)
                            {
                                var index = ret.index + ret[0].length;
                                var head = jstext.slice(0, index);
                                var tail = jstext.slice(index);
                                jstext = head + ",i" + tail;
                                console.log("[UserScript] Injection: patch_0 injected successful " + srimgc.src);
                            }

                            ret = func_renderInto2.exec(jstext);

                            if (ret)
                            {
                                var index = ret.index + ret[0].length;
                                var head = jstext.slice(0, index);
                                var tail = jstext.slice(index);
                                jstext = head + ",image_data" + tail;
                                console.log("[UserScript] Injection: patch_1 injected successful " + srimgc.src);
                                if (!func_renderInto1.exec(jstext))
                                     console.log("[UserScript] But patch_0 failed " + srimgc.src);
                            }

                            ret = fund_drawArrays.exec(jstext);

                            if (ret)
                            {
                                var index = ret.index + ret[0].length;
                                var head = jstext.slice(0, index);
                                var tail = jstext.slice(index);
                                jstext = head + ",window.drawhookimg(t,image_data)" + tail;
                                console.log("[UserScript] Injection: patch_2 injected successful " + srimgc.src);
                            }

                            ret = func_getResourceImage.exec(jstext);

                            if (ret)
                            {
                                var index = ret.index + ret[0].length;
                                var head = jstext.slice(0, index);
                                var tail = jstext.slice(index);
                                jstext = head + "e = window.drawhookcanvas(e,this._imageModel);" + tail;
                                console.log("[UserScript] Injection: patch_3 injected successful " + srimgc.src);
                            }

                            ret = func_drawGeometry.exec(jstext);

                            if (ret)
                            {
                                var index1 = ret.index + ret[1].length;
                                var head1 = jstext.slice(0, index1);
                                var tail1 = jstext.slice(index1);
                                jstext = head1 + ";window.attachbody(t);" + tail1;
                                console.log("[UserScript] Injection: patch_4 injected successful " + srimgc.src);
                                setTimeout(addbtnfunc, 3000);
                            }
                            //ret = func_test.exec(jstext)
                            var idx = 0;
                            // while (ret = func_test.exec(jstext))
                            // {
                            //     var index = ret.index + ret[0].length;
                            //     var head = jstext.slice(0, index);
                            //     var tail = jstext.slice(index);
                            //     jstext = head +"window.attachbody(e);"+ tail;
                            //     //jstext = head + "window.drawhook(e);" + tail;
                            //     func_test.lastIndex = index + 1000;
                            //     console.log("[UserScript] Injection: patch_4 injected successful" + srimgc.src);
                            //     setTimeout(addbtnfunc, 3000);
                            // }

                            var obj = document.createElement('script');
                            obj.type = "text/javascript";
                            obj.text = jstext;
                            document.getElementsByTagName('head')[0].appendChild(obj);
                        }
                    }
                });
            };
        })();

ArtStation

#!/usr/bin/env python3
"""
ArtStation Marmoset Viewer Grabber
===================================
Downloads 3D models (.mview) from ArtStation artwork pages,
extracts textures and mesh data, converts meshes to .obj format.

Uses Playwright to bypass Cloudflare protection on ArtStation.

Usage:
    python grabber.py <artstation_url_or_mview_file> [--output <dir>]

Examples:
    python grabber.py https://www.artstation.com/artwork/0mr3G
    python grabber.py https://www.artstation.com/artwork/0mr3G --output my_model
    python grabber.py downloaded_file.mview
"""

import argparse
import io
import json
import os
import re
import struct
import sys
from struct import unpack
from urllib.parse import urlparse

import requests
from playwright.sync_api import sync_playwright


# ═══════════════════════════════════════════════
#  Constants
# ═══════════════════════════════════════════════

ARTSTATION_ARTWORK_RE = re.compile(r"artstation\.com/artwork/([A-Za-z0-9]+)")

BROWSER_UA = (
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
    "AppleWebKit/537.36 (KHTML, like Gecko) "
    "Chrome/120.0.0.0 Safari/537.36"
)


# ═══════════════════════════════════════════════
#  ArtStation: find .mview URL via browser
# ═══════════════════════════════════════════════

def extract_artwork_id(url: str) -> str:
    """Pull the hash_id from an ArtStation artwork URL."""
    m = ARTSTATION_ARTWORK_RE.search(url)
    if not m:
        raise ValueError(f"Cannot extract artwork ID from URL: {url}")
    return m.group(1)


def _create_browser_context(playwright):
    """Create a stealth browser context that bypasses Cloudflare."""
    browser = playwright.chromium.launch(
        headless=True,
        args=["--disable-blink-features=AutomationControlled"],
    )
    context = browser.new_context(
        user_agent=BROWSER_UA,
        viewport={"width": 1920, "height": 1080},
    )
    context.add_init_script(
        'Object.defineProperty(navigator, "webdriver", {get: () => undefined})'
    )
    return browser, context


def find_mview_info(artwork_url: str) -> dict:
    """
    Use Playwright to load the ArtStation artwork page,
    intercept the API JSON, find the marmoset embed,
    and extract the .mview download URL.

    Returns dict with keys: title, author, mview_url, embed_id, thumbnail_url
    """
    artwork_id = extract_artwork_id(artwork_url)
    result = {
        "title": artwork_id,
        "author": "unknown",
        "mview_url": None,
        "embed_id": None,
        "thumbnail_url": None,
    }

    print(f"[*] Opening ArtStation page in browser...")

    with sync_playwright() as p:
        browser, context = _create_browser_context(p)
        page = context.new_page()

        api_data = [None]

        def on_response(response):
            if f"/projects/{artwork_id}.json" in response.url and response.status == 200:
                try:
                    api_data[0] = response.json()
                except Exception:
                    pass

        page.on("response", on_response)
        page.goto(artwork_url, wait_until="load", timeout=30000)
        page.wait_for_timeout(10000)  # wait for SPA to fetch API data

        # ── Parse API data ──
        if api_data[0]:
            data = api_data[0]
            result["title"] = data.get("title", artwork_id)
            result["author"] = data.get("user", {}).get("full_name", "unknown")
            print(f'[*] Artwork: "{result["title"]}" by {result["author"]}')

            # Find marmoset asset
            for asset in data.get("assets", []):
                if asset.get("asset_type") == "marmoset":
                    result["embed_id"] = asset.get("id")
                    result["thumbnail_url"] = asset.get("image_url")
                    player = asset.get("player_embedded") or ""
                    # Extract embed URL from iframe
                    embed_match = re.search(
                        r'src="(https://www\.artstation\.com/embed/\d+)"', player
                    )
                    if embed_match:
                        embed_url = embed_match.group(1)
                        print(f"[*] Found marmoset embed: {embed_url}")
                    break
            else:
                print("[!] No marmoset 3D viewer found in this artwork.")
                browser.close()
                return result

        if not result["embed_id"]:
            # Fallback: search page HTML for embed iframes
            content = page.content()
            embed_match = re.search(
                r'src="(https://www\.artstation\.com/embed/(\d+))"', content
            )
            if embed_match:
                result["embed_id"] = int(embed_match.group(2))
            else:
                print("[!] Could not find marmoset embed on this page.")
                browser.close()
                return result

        # ── Load embed page to get .mview URL ──
        embed_url = f"https://www.artstation.com/embed/{result['embed_id']}"
        print(f"[*] Loading embed page: {embed_url}")

        embed_page = context.new_page()
        embed_page.goto(embed_url, wait_until="load", timeout=30000)
        embed_page.wait_for_timeout(3000)

        embed_content = embed_page.content()
        mview_matches = re.findall(
            r"(https?://[^\s\"'<>]+\.mview[^\s\"'<>]*)", embed_content
        )

        if mview_matches:
            # Take the one from marmoset.embed() call - clean off query params for download
            raw_url = mview_matches[0]
            # Keep the URL with query params (sometimes needed for auth)
            result["mview_url"] = raw_url
            print(f"[+] Found .mview URL: {raw_url}")
        else:
            print("[!] Could not find .mview URL in embed page.")

        browser.close()

    return result


# ═══════════════════════════════════════════════
#  Download .mview file
# ═══════════════════════════════════════════════

def download_mview(url: str, output_path: str) -> str:
    """Download the .mview file with progress indication."""
    print(f"[*] Downloading: {url}")
    headers = {"User-Agent": BROWSER_UA}
    resp = requests.get(url, headers=headers, stream=True, timeout=120)
    resp.raise_for_status()

    total = int(resp.headers.get("content-length", 0))
    downloaded = 0

    with open(output_path, "wb") as f:
        for chunk in resp.iter_content(chunk_size=8192):
            f.write(chunk)
            downloaded += len(chunk)
            if total > 0:
                pct = downloaded / total * 100
                size_mb = downloaded / (1024 * 1024)
                total_mb = total / (1024 * 1024)
                print(
                    f"\r[*] Progress: {size_mb:.1f}/{total_mb:.1f} MB ({pct:.0f}%)",
                    end="",
                    flush=True,
                )

    print()
    print(f"[+] Downloaded: {output_path} ({downloaded / (1024*1024):.1f} MB)")
    return output_path


# ═══════════════════════════════════════════════
#  MVIEW archive extraction (LZW decompression)
# ═══════════════════════════════════════════════

def _read_cstr(f) -> str:
    """Read a null-terminated C string from a binary stream."""
    buf = []
    while True:
        b = struct.unpack("<b", f.read(1))[0]
        if b == 0:
            return "".join(map(chr, buf))
        buf.append(b)


def _read_uint32(f) -> int:
    return struct.unpack("<I", f.read(4))[0]


def _decompress(a: bytes, b: int) -> bytearray | None:
    """
    LZW-variant decompression used by Marmoset Viewer format.
    Based on reverse-engineering by @majidemo (majimboo/mviewer).
    """
    c = bytearray(b)
    d = 0
    e = [0] * 4096
    f = [0] * 4096
    g = 256
    h = len(a)
    k = 0
    l = 1  # noqa: E741
    m = 0
    n = 1

    c[d] = a[0]
    d += 1

    r = 1
    while True:
        n = r + (r >> 1)
        if (n + 1) >= h:
            break
        m = a[n + 1]
        n = a[n]
        p = (m << 4 | n >> 4) if r & 1 else ((m & 15) << 8 | n)
        if p < g:
            if 256 > p:
                m = d
                n = 1
                c[d] = p
                d += 1
            else:
                m = d
                n = f[p]
                p = e[p]
                q = p + n
                while p < q:
                    c[d] = c[p]
                    d += 1
                    p += 1
        elif p == g:
            m = d
            n = l + 1
            p = k
            q = k + l
            while p < q:
                c[d] = c[p]
                d += 1
                p += 1
            c[d] = c[k]
            d += 1
        else:
            break

        e[g] = k
        f[g] = l + 1
        g += 1
        k = m
        l = n  # noqa: E741
        g = 256 if 4096 <= g else g
        r += 1

    return c if d == b else None


def extract_mview(mview_path: str, output_dir: str) -> list[dict]:
    """
    Extract all files from an .mview archive.
    Returns list of dicts: {name, type, size, path}.
    """
    os.makedirs(output_dir, exist_ok=True)
    extracted = []

    with open(mview_path, "rb") as f:
        f.seek(0, 2)
        end = f.tell()
        f.seek(0)

        while f.tell() < end:
            name = _read_cstr(f)
            ftype = _read_cstr(f)
            compressed_flag = _read_uint32(f)
            size = _read_uint32(f)
            raw_size = _read_uint32(f)

            data = f.read(size)

            if compressed_flag & 1:
                data = _decompress(data, raw_size)
                if data is None:
                    print(f"[!] WARNING: decompression failed for {name}")
                    continue

            out_path = os.path.join(output_dir, name)
            with open(out_path, "wb") as outf:
                outf.write(data)

            file_info = {
                "name": name,
                "type": ftype,
                "size": len(data),
                "path": out_path,
            }
            extracted.append(file_info)

            # Categorize for nice output
            if ftype.startswith("image/"):
                category = "texture"
            elif ftype.startswith("model/"):
                category = "mesh"
            elif "json" in ftype:
                category = "metadata"
            else:
                category = "other"

            size_kb = len(data) / 1024
            print(f"    [{category:>8}] {name} ({size_kb:.1f} KB)")

    return extracted


# ═══════════════════════════════════════════════
#  Mesh .dat → .obj + .mtl conversion
# ═══════════════════════════════════════════════

def convert_meshes_to_obj(output_dir: str) -> list[str]:
    """
    Read scene.json from extracted mview, convert mesh .dat files to .obj.
    Returns list of created .obj file paths.
    """
    scene_path = os.path.join(output_dir, "scene.json")
    if not os.path.exists(scene_path):
        print("[!] No scene.json found — skipping mesh conversion")
        return []

    with open(scene_path, "r") as f:
        scene = json.load(f)

    obj_files = []

    # ── Create .mtl (materials) ──
    mtl_path = os.path.join(output_dir, "master.mtl")
    with open(mtl_path, "w") as mtl:
        for mat in scene.get("materials", []):
            name = mat.get("name", "unnamed")
            albedo = mat.get("albedoTex", "")
            normal = mat.get("normalTex", "")
            reflect = mat.get("reflectivityTex", "")

            mtl.write(f"newmtl {name}\n")
            if albedo:
                mtl.write(f"map_Ka {albedo}\n")
                mtl.write(f"map_Kd {albedo}\n")
            if normal:
                mtl.write(f"bump {normal}\n")
                mtl.write(f"map_bump {normal}\n")
            if reflect:
                mtl.write(f"map_Ks {reflect}\n")
            mtl.write("\n")

    print(f"[+] Materials: {mtl_path}")

    # ── Convert each mesh ──
    for mesh in scene.get("meshes", []):
        name = mesh.get("name", "unnamed")
        dat_file = mesh.get("file", "")
        if not dat_file:
            continue

        dat_path = os.path.join(output_dir, dat_file)
        if not os.path.exists(dat_path):
            print(f"[!] Mesh file not found: {dat_file}")
            continue

        wire_count = mesh.get("wireCount", 0)
        vertex_count = mesh.get("vertexCount", 0)
        index_type_size = mesh.get("indexTypeSize", 2)

        tex_coord_2 = mesh.get("secondaryTexCoord", 0)
        vertex_color = mesh.get("vertexColor", 0)

        # Calculate vertex stride
        stride = 32
        if vertex_color > 0:
            stride += 4
        if tex_coord_2 > 0:
            stride += 8

        obj_path = os.path.join(output_dir, f"{dat_file}.obj")
        obj_files.append(obj_path)

        with open(dat_path, "rb") as df, open(obj_path, "w") as out:
            out.write("mtllib master.mtl\n")

            face_list = []
            vert_list = []
            uv_list = []
            materials_list = []

            # Read faces for each submesh
            for sub_mesh in mesh.get("subMeshes", []):
                faces = []
                material = sub_mesh.get("material", "")
                sub_index_count = sub_mesh.get("indexCount", 0)

                if index_type_size == 4:
                    face_count = (sub_index_count * index_type_size) // 12
                else:
                    face_count = (sub_index_count * index_type_size) // 6

                for _ in range(face_count):
                    if index_type_size == 2:
                        faces.append(unpack("<HHH", df.read(6)))
                    else:
                        faces.append(unpack("<III", df.read(12)))

                face_list.append(faces)
                materials_list.append(material)

            # Skip wire indices
            df.seek(wire_count * index_type_size, 1)

            # Read vertices
            for _ in range(vertex_count):
                pos = unpack("<fff", df.read(12))
                texpos = unpack("<ff", df.read(8))
                df.read(stride - 20)  # skip remaining stride data

                vert_list.append(pos)
                uv_list.append(texpos)

            # Write vertices
            for v in vert_list:
                out.write(f"v {v[0]} {v[1]} {v[2]}\n")

            # Write UVs
            for uv in uv_list:
                out.write(f"vt {uv[0]} {uv[1]}\n")

            # Write faces grouped by submesh/material
            for i, faces in enumerate(face_list):
                out.write(f"\ng {name}\n")
                out.write(f"usemtl {materials_list[i]}\n")
                for face in faces:
                    a, b, c = face[0] + 1, face[1] + 1, face[2] + 1
                    out.write(f"f {a}/{a}/{a} {b}/{b}/{b} {c}/{c}/{c}\n")

        print(f"[+] Mesh -> OBJ: {obj_path}")

    return obj_files


# ═══════════════════════════════════════════════
#  Summary
# ═══════════════════════════════════════════════

def print_summary(output_dir: str, extracted: list[dict], obj_files: list[str]):
    textures = [f for f in extracted if f["type"].startswith("image/")]
    meshes = [f for f in extracted if f["type"].startswith("model/")]
    metadata = [f for f in extracted if "json" in f["type"]]

    print("\n" + "=" * 55)
    print("  EXTRACTION SUMMARY")
    print("=" * 55)
    print(f"  Output directory : {os.path.abspath(output_dir)}")
    print(f"  Textures         : {len(textures)} files")
    for t in textures:
        print(f"    - {t['name']} ({t['size'] / 1024:.1f} KB)")
    print(f"  Mesh data (.dat) : {len(meshes)} files")
    print(f"  OBJ models       : {len(obj_files)} files")
    for o in obj_files:
        print(f"    - {os.path.basename(o)}")
    print(f"  Metadata         : {len(metadata)} files")
    print("=" * 55)


# ═══════════════════════════════════════════════
#  Main
# ═══════════════════════════════════════════════

def sanitize_dirname(name: str) -> str:
    """Make a safe directory name from artwork title."""
    name = re.sub(r'[<>:"/\\|?*]', "_", name)
    name = name.strip(". ")
    return name[:100] if name else "artstation_model"


def main():
    parser = argparse.ArgumentParser(
        description="Download and extract 3D models from ArtStation (Marmoset Viewer)"
    )
    parser.add_argument(
        "source",
        help="ArtStation artwork URL or path to local .mview file",
    )
    parser.add_argument(
        "--output",
        "-o",
        help="Output directory (default: auto-generated from artwork title)",
        default=None,
    )
    parser.add_argument(
        "--skip-obj",
        help="Skip .dat -> .obj conversion",
        action="store_true",
    )

    args = parser.parse_args()
    source = args.source

    # ── Local .mview file ──
    if source.endswith(".mview") and os.path.isfile(source):
        mview_path = source
        output_dir = args.output or os.path.splitext(source)[0]
        extracted_dir = output_dir

    # ── ArtStation URL ──
    elif "artstation.com" in source:
        info = find_mview_info(source)

        if not info["mview_url"]:
            print("[!] ERROR: No .mview file found for this artwork.")
            print("    This artwork may not contain a Marmoset 3D viewer.")
            sys.exit(1)

        # Determine output directory
        if args.output:
            output_dir = args.output
        else:
            artwork_id = extract_artwork_id(source)
            output_dir = sanitize_dirname(f'{info["title"]}_{artwork_id}')

        os.makedirs(output_dir, exist_ok=True)

        # Build filename from URL
        mview_filename = info["mview_url"].split("/")[-1].split("?")[0]
        if not mview_filename.endswith(".mview"):
            mview_filename = f"{extract_artwork_id(source)}.mview"

        mview_path = os.path.join(output_dir, mview_filename)
        download_mview(info["mview_url"], mview_path)

        extracted_dir = os.path.join(
            output_dir, os.path.splitext(mview_filename)[0]
        )
    else:
        print(
            f"[!] ERROR: '{source}' is neither a valid ArtStation URL "
            "nor a .mview file."
        )
        sys.exit(1)

    # ── Extract the .mview archive ──
    print(f"\n[*] Extracting .mview archive...")
    extracted = extract_mview(mview_path, extracted_dir)
    print(f"[+] Extracted {len(extracted)} files -> {extracted_dir}")

    # ── Convert meshes to .obj ──
    obj_files = []
    if not args.skip_obj:
        print(f"\n[*] Converting meshes to OBJ format...")
        obj_files = convert_meshes_to_obj(extracted_dir)
        if not obj_files:
            print("[!] No meshes to convert (or no scene.json)")

    # ── Summary ──
    print_summary(extracted_dir, extracted, obj_files)
    print(f"\n[+] Done! Files saved to: {os.path.abspath(extracted_dir)}")
    return 0


if __name__ == "__main__":
    sys.exit(main() or 0)

Tripo3D

#!/usr/bin/env python3
"""
Tripo3D Studio Grabber
======================
Downloads 3D models (GLB, OBJ, FBX, etc.) from Tripo3D Studio workspace URLs.
Uses Playwright to load the page and intercept model URLs or binary model data.

Usage:
    python tripoGrabber.py <tripo_workspace_url> [--output <dir>] [--format fbx|glb|obj|auto]

Examples:
    python tripoGrabber.py "https://studio.tripo3d.ai/workspace/retopology/..."
    python tripoGrabber.py "https://studio.tripo3d.ai/workspace/..." --format fbx -o ./model.fbx
"""

import argparse
import json
import os
import re
import sys
from pathlib import Path
from urllib.parse import urlparse, unquote

import requests
from playwright.sync_api import sync_playwright


# ═══════════════════════════════════════════════
#  Constants
# ═══════════════════════════════════════════════

TRIPO_WORKSPACE_RE = re.compile(
    r"studio\.tripo3d\.ai/workspace/[^/]+/([a-z0-9-]+)"
)

MODEL_EXTENSIONS = (".glb", ".gltf", ".obj", ".fbx", ".stl")
MODEL_EXT_PATTERN = re.compile(
    r"\.(glb|gltf|obj|fbx|stl)(?:\?|$)", re.IGNORECASE
)

# Приоритет форматов при выборе из нескольких URL (первый = самый желаемый)
FORMAT_PRIORITY = {"fbx": [".fbx"], "glb": [".glb", ".gltf"], "obj": [".obj"], "stl": [".stl"]}

BROWSER_UA = (
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
    "AppleWebKit/537.36 (KHTML, like Gecko) "
    "Chrome/120.0.0.0 Safari/537.36"
)


# ═══════════════════════════════════════════════
#  URL / slug parsing
# ═══════════════════════════════════════════════

def extract_slug_from_url(url: str) -> str:
    """Extract the last path segment (slug with UUID) from a Tripo workspace URL."""
    parsed = urlparse(url)
    path = (parsed.path or "").strip("/")
    if "tripo3d.ai" not in (parsed.netloc or ""):
        raise ValueError(f"Not a Tripo3D URL: {url}")
    parts = [p for p in path.split("/") if p]
    # .../workspace/retopology/slug-uuid  or  .../workspace/overview
    if len(parts) >= 3:
        return parts[-1]
    raise ValueError(f"Cannot extract slug from URL: {url}")


def sanitize_dirname(name: str) -> str:
    """Safe directory name from slug (max 120 chars)."""
    name = re.sub(r'[<>:"/\\|?*]', "_", name)
    name = name.strip(". ")
    return name[:120] if name else "tripo_model"


# ═══════════════════════════════════════════════
#  Find model URL via Playwright
# ═══════════════════════════════════════════════

def _create_browser_context(playwright):
    """Create browser context (stealth)."""
    browser = playwright.chromium.launch(
        headless=True,
        args=["--disable-blink-features=AutomationControlled"],
    )
    context = browser.new_context(
        user_agent=BROWSER_UA,
        viewport={"width": 1920, "height": 1080},
    )
    context.add_init_script(
        'Object.defineProperty(navigator, "webdriver", {get: () => undefined})'
    )
    return browser, context


def _collect_urls_from_json(obj, out: set):
    """Recursively collect strings that look like model URLs from JSON."""
    if isinstance(obj, dict):
        for v in obj.values():
            _collect_urls_from_json(v, out)
    elif isinstance(obj, list):
        for v in obj:
            _collect_urls_from_json(v, out)
    elif isinstance(obj, str) and obj.startswith("http"):
        if MODEL_EXT_PATTERN.search(obj) or "/model" in obj.lower() or "glb" in obj.lower() or "fbx" in obj.lower() or "download" in obj.lower():
            out.add(obj.split("?")[0] if "?" in obj else obj)


def _pick_url_by_format(urls: list[str], preferred: str) -> str | None:
    """Выбрать URL с нужным форматом; при auto — первый в списке."""
    if not urls:
        return None
    if not preferred or preferred == "auto":
        return urls[0]
    preferred = preferred.lower()
    if preferred not in FORMAT_PRIORITY:
        return urls[0]
    suffixes = FORMAT_PRIORITY[preferred]
    for u in urls:
        ul = u.lower()
        if any(ul.rstrip("/").endswith(ext) or ext in ul for ext in suffixes):
            return u
    return urls[0]


def _try_trigger_fbx_export(page, model_urls: list) -> None:
    """Попытаться нажать Export/Download -> FBX в интерфейсе Tripo, чтобы появился запрос .fbx."""
    if any(".fbx" in u.lower() for u in model_urls):
        return
    # Ищем кнопки: Manage, Download, Export и пункт FBX
    selectors_try = [
        'button:has-text("Manage")',
        '[aria-label*="Manage"]',
        'button:has-text("Download")',
        'button:has-text("Export")',
        'a:has-text("Download")',
        '[class*="manage"]',
        '[class*="download"]',
    ]
    for sel in selectors_try:
        try:
            btn = page.query_selector(sel)
            if btn and btn.is_visible():
                btn.click()
                page.wait_for_timeout(2000)
                # Ищем FBX
                fbx_el = page.query_selector('button:has-text("FBX"), a:has-text("FBX"), [data-format="fbx"], [class*="fbx"]')
                if fbx_el and fbx_el.is_visible():
                    fbx_el.click()
                    page.wait_for_timeout(5000)
                    return
        except Exception:
            continue


def find_model_info(workspace_url: str, preferred_format: str = "auto") -> dict:
    """
    Open Tripo workspace URL in browser, intercept network:
    - Direct model file URLs (.glb, .obj, .fbx, etc.)
    - JSON API responses that contain model/download URLs.
    preferred_format: "fbx" | "glb" | "obj" | "auto" — какой формат предпочитать.
    Returns dict: model_url, model_data (bytes or None), content_type, title.
    """
    slug = extract_slug_from_url(workspace_url)
    result = {
        "title": sanitize_dirname(slug),
        "model_url": None,
        "model_data": None,
        "content_type": None,
        "filename": None,
    }

    print(f"[*] Opening Tripo Studio page...")
    print(f"[*] Slug: {slug}")
    if preferred_format and preferred_format != "auto":
        print(f"[*] Preferred format: {preferred_format.upper()}")

    model_urls = []
    api_jsons = []

    with sync_playwright() as p:
        browser, context = _create_browser_context(p)
        page = context.new_page()

        def on_response(response):
            url = response.url
            try:
                if response.status != 200:
                    return
                content_type = (response.headers.get("content-type") or "").lower()
                if any(ext in url.lower() for ext in MODEL_EXTENSIONS):
                    model_urls.append(url)
                    ext_hint = next((e for e in MODEL_EXTENSIONS if e in url.lower()), "")
                    print(f"[+] Model URL (direct{ext_hint and ' ' + ext_hint}'): {url[:90]}...")
                    return
                if "model/gltf" in content_type or "model/gltf-binary" in content_type:
                    model_urls.append(url)
                    print(f"[+] Model URL (content-type): {url[:90]}...")
                    return
                if "json" in content_type and ("api" in url or "tripo" in url or "workspace" in url or "project" in url):
                    try:
                        body = response.body()
                        data = json.loads(body.decode("utf-8", errors="ignore"))
                        api_jsons.append(data)
                    except Exception:
                        pass
            except Exception:
                pass

        page.on("response", on_response)
        page.goto(workspace_url, wait_until="load", timeout=60000)
        page.wait_for_timeout(15000)

        for data in api_jsons:
            urls_in_json = set()
            _collect_urls_from_json(data, urls_in_json)
            for u in urls_in_json:
                if MODEL_EXT_PATTERN.search(u) or "glb" in u.lower() or "fbx" in u.lower() or "model" in u.lower():
                    model_urls.append(u)
                    print(f"[+] Model URL (from API): {u[:90]}...")

        # Если нужен FBX, а в списке только GLB — пробуем нажать Export -> FBX
        if preferred_format and preferred_format.lower() == "fbx":
            _try_trigger_fbx_export(page, model_urls)
            # после клика могли добавиться новые URL
            page.wait_for_timeout(3000)

        seen = set()
        unique_urls = []
        for u in model_urls:
            u_clean = u.split("?")[0]
            if u_clean not in seen:
                seen.add(u_clean)
                unique_urls.append(u)

        chosen = _pick_url_by_format(unique_urls, (preferred_format or "auto").strip().lower())
        if chosen:
            result["model_url"] = chosen

        browser.close()

    if not result["model_url"]:
        return result

    # Derive filename from URL
    path_part = urlparse(result["model_url"]).path
    name = unquote(Path(path_part).name) if path_part else "model"
    if not any(name.lower().endswith(ext) for ext in MODEL_EXTENSIONS):
        name = name + ".glb"
    result["filename"] = name
    return result


# ═══════════════════════════════════════════════
#  Download model
# ═══════════════════════════════════════════════

def download_model(url: str, output_path: str) -> str:
    """Download binary model file with progress."""
    print(f"[*] Downloading: {url[:80]}...")
    headers = {"User-Agent": BROWSER_UA}
    resp = requests.get(url, headers=headers, stream=True, timeout=120)
    resp.raise_for_status()

    total = int(resp.headers.get("content-length", 0))
    downloaded = 0

    last_pct = -1
    with open(output_path, "wb") as f:
        for chunk in resp.iter_content(chunk_size=65536):
            f.write(chunk)
            downloaded += len(chunk)
            if total > 0:
                pct = min(100, int(downloaded / total * 100))
                if pct != last_pct and (pct % 10 == 0 or pct == 100):
                    size_mb = downloaded / (1024 * 1024)
                    total_mb = total / (1024 * 1024)
                    print(f"\r[*] Progress: {size_mb:.1f}/{total_mb:.1f} MB ({pct}%)", end="", flush=True)
                    last_pct = pct
    print()
    print(f"[+] Saved: {output_path} ({downloaded / (1024*1024):.2f} MB)")
    return output_path


# ═══════════════════════════════════════════════
#  Main
# ═══════════════════════════════════════════════

def main():
    parser = argparse.ArgumentParser(
        description="Download 3D models from Tripo3D Studio workspace URLs"
    )
    parser.add_argument(
        "url",
        help="Tripo3D workspace URL (e.g. studio.tripo3d.ai/workspace/retopology/...)",
    )
    parser.add_argument(
        "--output", "-o",
        help="Output directory or file path",
        default=None,
    )
    parser.add_argument(
        "--format", "-f",
        choices=["auto", "fbx", "glb", "obj", "stl"],
        default="auto",
        help="Preferred format: fbx, glb, obj, stl. If fbx and only GLB is loaded, will try to click Export->FBX (default: auto = first found)",
    )
    args = parser.parse_args()
    url = args.url.strip()
    if not url.startswith("http"):
        url = "https://" + url

    try:
        slug = extract_slug_from_url(url)
    except ValueError as e:
        print(f"[!] {e}")
        sys.exit(1)

    info = find_model_info(url, preferred_format=args.format)

    if not info["model_url"]:
        print("[!] No model URL found. The page may require login, or the project has no model.")
        sys.exit(1)

    if args.output:
        out = args.output
        if out.endswith((".glb", ".obj", ".fbx", ".stl", ".gltf")):
            output_file = out
            output_dir = os.path.dirname(output_file) or "."
        else:
            output_dir = out
            output_file = os.path.join(output_dir, info["filename"] or "model.glb")
    else:
        output_dir = sanitize_dirname(slug)
        output_file = os.path.join(output_dir, info["filename"] or "model.glb")

    os.makedirs(os.path.dirname(output_file) or ".", exist_ok=True)
    if args.format and args.format.lower() == "fbx" and ".fbx" not in (info["model_url"] or "").lower():
        print("[!] FBX requested but only non-FBX URL found; saving as-is. Try opening the page and clicking Export -> FBX manually if you need FBX.")
    download_model(info["model_url"], output_file)

    print(f"\n[+] Done. Model: {os.path.abspath(output_file)}")
    return 0


if __name__ == "__main__":
    sys.exit(main() or 0)

Конвертировать .glb с meshopt-сжатием в обычный .glb

#!/usr/bin/env python3
"""
GLB: снять EXT_meshopt_compression для Blender
==============================================
Конвертирует .glb с meshopt-сжатием в обычный .glb, который открывается в Blender
(ошибка «Extension EXT_meshopt_compression is not available»).

Использование:
  python glb_decode_meshopt.py input.glb [output.glb]

Требуется Node.js и npx. Установка зависимостей (один раз):
  https://nodejs.org/en/download
  npm install -g @gltf-transform/cli
  # или без глобальной установки (npx скачает при первом запуске):
  npx @gltf-transform/cli copy input.glb output.glb
  Сконвертировать:
  gltf-transform copy "D:\Projects\Python\ArtStationGrabber\armored-bust-with-circular-lambda-emblem-on-chest-plate-sci-fi-armor-99bae719-7e54-4af2-94c9-533df2e20f3c\tripo_base_model_2ca75095-6be1-4062-97cd-657d8081a8d2_meshopt.glb" "D:\Projects\Python\ArtStationGrabber\armored-bust-with-circular-lambda-emblem-on-chest-plate-sci-fi-armor-99bae719-7e54-4af2-94c9-533df2e20f3c\tripo_base_model_decoded.glb"
"""

import argparse
import os
import subprocess
import sys

# На Windows без shell=True subprocess не находит gltf-transform.cmd
USE_SHELL = sys.platform == "win32"


def run_cmd(cmd, inp: str, out: str, timeout: int = 120) -> bool:
    """Запуск команды: строка (для shell) или список аргументов."""
    try:
        if isinstance(cmd, str):
            print(f"[*] Running: {cmd}")
            r = subprocess.run(cmd, shell=True, timeout=timeout)
        else:
            full = cmd + [inp, out]
            print(f"[*] Running: {' '.join(full)}")
            r = subprocess.run(full, timeout=timeout)
        if r.returncode == 0:
            return True
        if getattr(r, "stderr", None) and r.stderr:
            print(r.stderr, file=sys.stderr)
        return False
    except FileNotFoundError:
        return False
    except subprocess.TimeoutExpired:
        print("[!] Timeout", file=sys.stderr)
        return False
    except Exception as e:
        print(f"[!] {e}", file=sys.stderr)
        return False


def main():
    parser = argparse.ArgumentParser(
        description="Convert meshopt-compressed GLB to standard GLB for Blender"
    )
    parser.add_argument("input", help="Input .glb file (with EXT_meshopt_compression)")
    parser.add_argument("output", nargs="?", default=None, help="Output .glb (default: input_decoded.glb)")
    args = parser.parse_args()

    inp = os.path.abspath(args.input)
    if not os.path.isfile(inp):
        print(f"[!] File not found: {inp}")
        sys.exit(1)
    if not inp.lower().endswith(".glb"):
        print("[!] Input should be a .glb file")
        sys.exit(1)

    if args.output:
        out = os.path.abspath(args.output)
    else:
        base, ext = os.path.splitext(inp)
        out = base + "_decoded.glb"

    # Сначала gltf-transform (глобально установленный), затем npx
    if USE_SHELL:
        commands = [
            f'gltf-transform copy "{inp}" "{out}"',
            f'npx -y @gltf-transform/cli copy "{inp}" "{out}"',
        ]
    else:
        commands = [
            ["gltf-transform", "copy"],
            ["npx", "-y", "@gltf-transform/cli", "copy"],
        ]

    for cmd in commands:
        if run_cmd(cmd, inp, out):
            print(f"[+] Saved: {out}")
            return 0

    print("""
[!] Automatic conversion failed. Do one of the following:

1) Install Node.js, then run (decodes meshopt and writes standard GLB):
   npm install -g @gltf-transform/cli
   gltf-transform copy "{}" "{}"

2) Or use npx (no global install):
   npx -y @gltf-transform/cli copy "{}" "{}"

3) If 'copy' still fails (meshopt not decoded), use this Node script.
   Create decode_meshopt.mjs and run: node decode_meshopt.mjs

   // decode_meshopt.mjs
   import {{ NodeIO }} from '@gltf-transform/core';
   import {{ EXTMeshoptCompression }} from '@gltf-transform/extensions';
   import {{ MeshoptDecoder }} from 'meshoptimizer';

   await MeshoptDecoder.ready;
   const io = new NodeIO()
     .registerExtensions([EXTMeshoptCompression])
     .registerDependencies({{ 'meshopt.decoder': MeshoptDecoder }});
   const doc = await io.read(process.argv[2]);
   await io.write(process.argv[3], doc);
   console.log('Written:', process.argv[3]);

   Install: npm init -y && npm install @gltf-transform/core @gltf-transform/extensions meshoptimizer
   Run: node decode_meshopt.mjs input.glb output.glb
""".format(inp, out, inp, out))
    sys.exit(1)


if __name__ == "__main__":
    sys.exit(main() or 0)

Комментарии

Комментариев пока нет.