gcc -o pgms/arithoh -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME -Darithoh src/arith.c gcc -o pgms/register -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME -Ddatum="register int" src/arith.c gcc -o pgms/short -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME -Ddatum=short src/arith.c gcc -o pgms/int -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME -Ddatum=int src/arith.c gcc -o pgms/long -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME -Ddatum=long src/arith.c gcc -o pgms/float -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME -Ddatum=float src/arith.c gcc -o pgms/double -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME -Ddatum=double src/arith.c gcc -o pgms/hanoi -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME src/hanoi.c gcc -o pgms/syscall -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME src/syscall.c gcc -o pgms/context1 -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME src/context1.c gcc -o pgms/pipe -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME src/pipe.c gcc -o pgms/spawn -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME src/spawn.c gcc -o pgms/execl -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME src/execl.c gcc -o pgms/dhry2 -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME -DHZ= ./src/dhry_1.c ./src/dhry_2.c gcc -o pgms/dhry2reg -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME -DHZ= -DREG=register ./src/dhry_1.c ./src/dhry_2.c gcc -o pgms/looper -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME src/looper.c gcc -o pgms/fstime -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME src/fstime.c gcc -o pgms/whetstone-double -Wall -pedantic -O3 -ffast-math -march=native -mtune=native -I ./src -DTIME -DDP -DUNIX -DUNIXBENCH src/whets.c -lm make all make[1]: Entering directory "/root/serverscope-CbEq1g/byte-unixbench/UnixBench" make distr make[2]: Entering directory "/root/serverscope-CbEq1g/byte-unixbench/UnixBench" Checking distribution of files ./pgms exists ./src exists ./testdir exists make[2]: Leaving directory "/root/serverscope-CbEq1g/byte-unixbench/UnixBench" make programs make[2]: Entering directory "/root/serverscope-CbEq1g/byte-unixbench/UnixBench" make[2]: Nothing to be done for "programs". make[2]: Leaving directory "/root/serverscope-CbEq1g/byte-unixbench/UnixBench" make[1]: Leaving directory "/root/serverscope-CbEq1g/byte-unixbench/UnixBench" sh: 1: 3dinfo: not found # # # # # # # ##### ###### # # #### # # # # ## # # # # # # # ## # # # # # # # # # # # ## ##### ##### # # # # ###### # # # # # # ## # # # # # # # # # # # # ## # # # # # # # ## # # # # #### # # # # # ##### ###### # # #### # # Version 5.1.3 Based on the Byte Magazine Unix Benchmark Multi-CPU version Version 5 revisions by Ian Smith, Sunnyvale, CA, USA January 13, 2011 johantheghost at yahoo period com 1 x Dhrystone 2 using register variables 1 2 3 4 5 6 7 8 9 10 1 x Double-Precision Whetstone 1 2 3 4 5 6 7 8 9 10 1 x Execl Throughput 1 2 3 1 x File Copy 1024 bufsize 2000 maxblocks 1 2 3 1 x File Copy 256 bufsize 500 maxblocks 1 2 3 1 x File Copy 4096 bufsize 8000 maxblocks 1 2 3 1 x Pipe Throughput 1 2 3 4 5 6 7 8 9 10 1 x Pipe-based Context Switching 1 2 3 4 5 6 7 8 9 10 1 x Process Creation 1 2 3 1 x System Call Overhead 1 2 3 4 5 6 7 8 9 10 1 x Shell Scripts (1 concurrent) 1 2 3 1 x Shell Scripts (8 concurrent) 1 2 3 8 x Dhrystone 2 using register variables 1 2 3 4 5 6 7 8 9 10 8 x Double-Precision Whetstone 1 2 3 4 5 6 7 8 9 10 8 x Execl Throughput 1 2 3 8 x File Copy 1024 bufsize 2000 maxblocks 1 2 3 8 x File Copy 256 bufsize 500 maxblocks 1 2 3 8 x File Copy 4096 bufsize 8000 maxblocks 1 2 3 8 x Pipe Throughput 1 2 3 4 5 6 7 8 9 10 8 x Pipe-based Context Switching 1 2 3 4 5 6 7 8 9 10 8 x Process Creation 1 2 3 8 x System Call Overhead 1 2 3 4 5 6 7 8 9 10 8 x Shell Scripts (1 concurrent) 1 2 3 8 x Shell Scripts (8 concurrent) 1 2 3 ======================================================================== BYTE UNIX Benchmarks (Version 5.1.3) System: v22019057140389136: GNU/Linux OS: GNU/Linux -- 4.9.0-0.bpo.9-amd64 -- #1 SMP Debian 4.9.168-1~deb8u1 (2019-04-28) Machine: x86_64 (unknown) Language: en_US.utf8 (charmap="UTF-8", collate="UTF-8") CPU 0: QEMU Virtual CPU version 2.5+ (4800.0 bogomips) Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT, SYSCALL/SYSRET CPU 1: QEMU Virtual CPU version 2.5+ (4800.0 bogomips) Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT, SYSCALL/SYSRET CPU 2: QEMU Virtual CPU version 2.5+ (4800.0 bogomips) Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT, SYSCALL/SYSRET CPU 3: QEMU Virtual CPU version 2.5+ (4800.0 bogomips) Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT, SYSCALL/SYSRET CPU 4: QEMU Virtual CPU version 2.5+ (4800.0 bogomips) Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT, SYSCALL/SYSRET CPU 5: QEMU Virtual CPU version 2.5+ (4800.0 bogomips) Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT, SYSCALL/SYSRET CPU 6: QEMU Virtual CPU version 2.5+ (4800.0 bogomips) Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT, SYSCALL/SYSRET CPU 7: QEMU Virtual CPU version 2.5+ (4800.0 bogomips) Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT, SYSCALL/SYSRET 14:51:07 up 26 min, 3 users, load average: 7.37, 4.53, 1.92; runlevel 2019-05-08 ------------------------------------------------------------------------ Benchmark Run: Wed May 08 2019 14:51:07 - 15:19:27 8 CPUs in system; running 1 parallel copy of tests Dhrystone 2 using register variables 33779602.7 lps (10.0 s, 7 samples) Double-Precision Whetstone 4231.9 MWIPS (11.7 s, 7 samples) Execl Throughput 3130.5 lps (30.0 s, 2 samples) File Copy 1024 bufsize 2000 maxblocks 695629.3 KBps (30.0 s, 2 samples) File Copy 256 bufsize 500 maxblocks 188595.3 KBps (30.0 s, 2 samples) File Copy 4096 bufsize 8000 maxblocks 1315272.6 KBps (30.0 s, 2 samples) Pipe Throughput 1101790.1 lps (10.0 s, 7 samples) Pipe-based Context Switching 70126.9 lps (10.0 s, 7 samples) Process Creation 7855.6 lps (30.0 s, 2 samples) Shell Scripts (1 concurrent) 8566.4 lpm (60.0 s, 2 samples) Shell Scripts (8 concurrent) 4372.8 lpm (60.0 s, 2 samples) System Call Overhead 1009892.3 lps (10.0 s, 7 samples) System Benchmarks Index Values BASELINE RESULT INDEX Dhrystone 2 using register variables 116700.0 33779602.7 2894.6 Double-Precision Whetstone 55.0 4231.9 769.4 Execl Throughput 43.0 3130.5 728.0 File Copy 1024 bufsize 2000 maxblocks 3960.0 695629.3 1756.6 File Copy 256 bufsize 500 maxblocks 1655.0 188595.3 1139.5 File Copy 4096 bufsize 8000 maxblocks 5800.0 1315272.6 2267.7 Pipe Throughput 12440.0 1101790.1 885.7 Pipe-based Context Switching 4000.0 70126.9 175.3 Process Creation 126.0 7855.6 623.5 Shell Scripts (1 concurrent) 42.4 8566.4 2020.4 Shell Scripts (8 concurrent) 6.0 4372.8 7288.0 System Call Overhead 15000.0 1009892.3 673.3 ======== System Benchmarks Index Score 1176.9 ------------------------------------------------------------------------ Benchmark Run: Wed May 08 2019 15:19:27 - 15:47:45 8 CPUs in system; running 8 parallel copies of tests Dhrystone 2 using register variables 255739625.4 lps (10.0 s, 7 samples) Double-Precision Whetstone 36499.8 MWIPS (10.4 s, 7 samples) Execl Throughput 24788.5 lps (30.0 s, 2 samples) File Copy 1024 bufsize 2000 maxblocks 919755.2 KBps (30.0 s, 2 samples) File Copy 256 bufsize 500 maxblocks 255290.8 KBps (30.0 s, 2 samples) File Copy 4096 bufsize 8000 maxblocks 2357590.1 KBps (30.0 s, 2 samples) Pipe Throughput 8506609.8 lps (10.0 s, 7 samples) Pipe-based Context Switching 1075376.9 lps (10.0 s, 7 samples) Process Creation 54451.1 lps (30.0 s, 2 samples) Shell Scripts (1 concurrent) 49223.5 lpm (60.0 s, 2 samples) Shell Scripts (8 concurrent) 6233.9 lpm (60.0 s, 2 samples) System Call Overhead 5978501.6 lps (10.0 s, 7 samples) System Benchmarks Index Values BASELINE RESULT INDEX Dhrystone 2 using register variables 116700.0 255739625.4 21914.3 Double-Precision Whetstone 55.0 36499.8 6636.3 Execl Throughput 43.0 24788.5 5764.8 File Copy 1024 bufsize 2000 maxblocks 3960.0 919755.2 2322.6 File Copy 256 bufsize 500 maxblocks 1655.0 255290.8 1542.5 File Copy 4096 bufsize 8000 maxblocks 5800.0 2357590.1 4064.8 Pipe Throughput 12440.0 8506609.8 6838.1 Pipe-based Context Switching 4000.0 1075376.9 2688.4 Process Creation 126.0 54451.1 4321.5 Shell Scripts (1 concurrent) 42.4 49223.5 11609.3 Shell Scripts (8 concurrent) 6.0 6233.9 10389.8 System Call Overhead 15000.0 5978501.6 3985.7 ======== System Benchmarks Index Score 5277.0
dd if=/dev/zero of=benchmark bs=64K count=32K conv=fdatasync 32768+0 records in 32768+0 records out 2147483648 bytes (2.1 GB) copied, 5.93195 s, 362 MB/s dd if=/dev/zero of=benchmark bs=1M count=2048 conv=fdatasync 2048+0 records in 2048+0 records out 2147483648 bytes (2.1 GB) copied, 4.88756 s, 439 MB/s
./fio --time_based --name=benchmark --size=256M --runtime=60 --randrepeat=1 --iodepth=32 --invalidate=1 --verify=0 --verify_fatal=0 --numjobs=8 --rw=randread --blocksize=4k --group_reporting benchmark: (g=0): rw=randread, bs=4K-4K/4K-4K/4K-4K, ioengine=sync, iodepth=32 ... fio-2.8 Starting 8 processes benchmark: Laying out IO file(s) (1 file(s) / 256MB) benchmark: Laying out IO file(s) (1 file(s) / 256MB) benchmark: Laying out IO file(s) (1 file(s) / 256MB) benchmark: Laying out IO file(s) (1 file(s) / 256MB) benchmark: Laying out IO file(s) (1 file(s) / 256MB) benchmark: Laying out IO file(s) (1 file(s) / 256MB) benchmark: Laying out IO file(s) (1 file(s) / 256MB) benchmark: Laying out IO file(s) (1 file(s) / 256MB) benchmark: (groupid=0, jobs=8): err= 0: pid=18454: Wed May 8 14:48:04 2019 read : io=147640MB, bw=2460.5MB/s, iops=629876, runt= 60005msec clat (usec): min=0, max=1011.4K, avg=10.78, stdev=613.96 lat (usec): min=1, max=1011.4K, avg=11.12, stdev=613.96 clat percentiles (usec): | 1.00th=[ 1], 5.00th=[ 1], 10.00th=[ 1], 20.00th=[ 1], | 30.00th=[ 2], 40.00th=[ 2], 50.00th=[ 2], 60.00th=[ 2], | 70.00th=[ 2], 80.00th=[ 2], 90.00th=[ 2], 95.00th=[ 2], | 99.00th=[ 3], 99.50th=[ 116], 99.90th=[ 2064], 99.95th=[ 6240], | 99.99th=[10176] bw (KB /s): min= 7, max=1095856, per=12.72%, avg=320495.28, stdev=488734.67 lat (usec) : 2=23.35%, 4=75.71%, 10=0.09%, 20=0.13%, 50=0.03% lat (usec) : 100=0.11%, 250=0.41%, 500=0.05%, 750=0.01%, 1000=0.01% lat (msec) : 2=0.02%, 4=0.02%, 10=0.07%, 20=0.01%, 50=0.01% lat (msec) : 100=0.01%, 250=0.01%, 500=0.01%, 750=0.01%, 2000=0.01% cpu : usr=9.16%, sys=21.65%, ctx=261885, majf=0, minf=86 IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% issued : total=r=37795760/w=0/d=0, short=r=0/w=0/d=0, drop=r=0/w=0/d=0 latency : target=0, window=0, percentile=100.00%, depth=32 Run status group 0 (all jobs): READ: io=147640MB, aggrb=2460.5MB/s, minb=2460.5MB/s, maxb=2460.5MB/s, mint=60005msec, maxt=60005msec Disk stats (read/write): sda: ios=261225/102, merge=0/324, ticks=333420/9688, in_queue=344260, util=100.00%
./fio --time_based --name=benchmark --size=256M --runtime=60 --randrepeat=1 --iodepth=32 --direct=1 --invalidate=1 --verify=0 --verify_fatal=0 --numjobs=8 --rw=randread --blocksize=4k --group_reporting benchmark: (g=0): rw=randread, bs=4K-4K/4K-4K/4K-4K, ioengine=sync, iodepth=32 ... fio-2.8 Starting 8 processes benchmark: (groupid=0, jobs=8): err= 0: pid=18466: Wed May 8 14:49:04 2019 read : io=3754.7MB, bw=64070KB/s, iops=16017, runt= 60008msec clat (usec): min=36, max=675119, avg=495.36, stdev=2193.59 lat (usec): min=36, max=675119, avg=495.83, stdev=2193.60 clat percentiles (usec): | 1.00th=[ 53], 5.00th=[ 60], 10.00th=[ 65], 20.00th=[ 75], | 30.00th=[ 86], 40.00th=[ 103], 50.00th=[ 137], 60.00th=[ 175], | 70.00th=[ 207], 80.00th=[ 342], 90.00th=[ 836], 95.00th=[ 1256], | 99.00th=[ 8032], 99.50th=[ 9152], 99.90th=[17792], 99.95th=[28032], | 99.99th=[75264] bw (KB /s): min= 145, max=21088, per=12.57%, avg=8052.23, stdev=8712.25 lat (usec) : 50=0.28%, 100=37.99%, 250=37.25%, 500=10.26%, 750=3.85% lat (usec) : 1000=1.04% lat (msec) : 2=5.58%, 4=0.91%, 10=2.50%, 20=0.26%, 50=0.06% lat (msec) : 100=0.02%, 250=0.01%, 500=0.01%, 750=0.01% cpu : usr=1.16%, sys=4.79%, ctx=961301, majf=0, minf=90 IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% issued : total=r=961178/w=0/d=0, short=r=0/w=0/d=0, drop=r=0/w=0/d=0 latency : target=0, window=0, percentile=100.00%, depth=32 Run status group 0 (all jobs): READ: io=3754.7MB, aggrb=64069KB/s, minb=64069KB/s, maxb=64069KB/s, mint=60008msec, maxt=60008msec Disk stats (read/write): sda: ios=960572/40, merge=0/22, ticks=451960/0, in_queue=452256, util=99.97%
./fio --time_based --name=benchmark --size=256M --runtime=60 --filename=benchmark --randrepeat=1 --iodepth=32 --invalidate=1 --verify=0 --verify_fatal=0 --numjobs=8 --rw=randwrite --blocksize=4k --group_reporting benchmark: (g=0): rw=randwrite, bs=4K-4K/4K-4K/4K-4K, ioengine=sync, iodepth=32 ... fio-2.8 Starting 8 processes benchmark: (groupid=0, jobs=8): err= 0: pid=18497: Wed May 8 14:51:04 2019 write: io=85517MB, bw=1425.3MB/s, iops=364868, runt= 60001msec clat (usec): min=1, max=84321, avg=19.75, stdev=239.00 lat (usec): min=2, max=84327, avg=20.14, stdev=239.01 clat percentiles (usec): | 1.00th=[ 3], 5.00th=[ 4], 10.00th=[ 8], 20.00th=[ 11], | 30.00th=[ 12], 40.00th=[ 14], 50.00th=[ 16], 60.00th=[ 17], | 70.00th=[ 19], 80.00th=[ 21], 90.00th=[ 23], 95.00th=[ 26], | 99.00th=[ 34], 99.50th=[ 42], 99.90th=[ 171], 99.95th=[ 1208], | 99.99th=[11072] bw (KB /s): min=119000, max=262128, per=12.49%, avg=182340.99, stdev=27735.18 lat (usec) : 2=0.01%, 4=2.40%, 10=13.01%, 20=59.37%, 50=24.84% lat (usec) : 100=0.17%, 250=0.13%, 500=0.02%, 750=0.01%, 1000=0.01% lat (msec) : 2=0.01%, 4=0.01%, 10=0.02%, 20=0.01%, 50=0.01% lat (msec) : 100=0.01% cpu : usr=6.47%, sys=81.79%, ctx=5926969, majf=0, minf=94 IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% issued : total=r=0/w=21892469/d=0, short=r=0/w=0/d=0, drop=r=0/w=0/d=0 latency : target=0, window=0, percentile=100.00%, depth=32 Run status group 0 (all jobs): WRITE: io=85517MB, aggrb=1425.3MB/s, minb=1425.3MB/s, maxb=1425.3MB/s, mint=60001msec, maxt=60001msec Disk stats (read/write): sda: ios=0/26022, merge=0/177, ticks=0/860460, in_queue=860448, util=10.49%
./fio --time_based --name=benchmark --size=256M --runtime=60 --filename=benchmark --randrepeat=1 --iodepth=32 --direct=1 --invalidate=1 --verify=0 --verify_fatal=0 --numjobs=8 --rw=randwrite --blocksize=4k --group_reporting benchmark: (g=0): rw=randwrite, bs=4K-4K/4K-4K/4K-4K, ioengine=sync, iodepth=32 ... fio-2.8 Starting 8 processes benchmark: Laying out IO file(s) (1 file(s) / 256MB) benchmark: (groupid=0, jobs=8): err= 0: pid=18479: Wed May 8 14:50:04 2019 write: io=2192.2MB, bw=37412KB/s, iops=9352, runt= 60001msec clat (usec): min=68, max=76039, avg=851.73, stdev=1818.01 lat (usec): min=69, max=76040, avg=852.29, stdev=1818.05 clat percentiles (usec): | 1.00th=[ 76], 5.00th=[ 78], 10.00th=[ 81], 20.00th=[ 89], | 30.00th=[ 103], 40.00th=[ 201], 50.00th=[ 716], 60.00th=[ 756], | 70.00th=[ 820], 80.00th=[ 940], 90.00th=[ 1560], 95.00th=[ 2608], | 99.00th=[ 6816], 99.50th=[10816], 99.90th=[25216], 99.95th=[32384], | 99.99th=[51456] bw (KB /s): min= 862, max=30920, per=12.51%, avg=4680.28, stdev=1843.25 lat (usec) : 100=27.83%, 250=12.62%, 500=0.20%, 750=18.03%, 1000=24.25% lat (msec) : 2=9.55%, 4=5.09%, 10=1.87%, 20=0.42%, 50=0.13% lat (msec) : 100=0.01% cpu : usr=0.61%, sys=4.82%, ctx=1122244, majf=0, minf=91 IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% issued : total=r=0/w=561184/d=0, short=r=0/w=0/d=0, drop=r=0/w=0/d=0 latency : target=0, window=0, percentile=100.00%, depth=32 Run status group 0 (all jobs): WRITE: io=2192.2MB, aggrb=37411KB/s, minb=37411KB/s, maxb=37411KB/s, mint=60001msec, maxt=60001msec Disk stats (read/write): sda: ios=0/560882, merge=0/1797, ticks=0/47916, in_queue=47804, util=79.67%
Downloaded 104857600 bytes in 1.128 sec Downloaded 104857600 bytes in 1.169 sec Downloaded 104857600 bytes in 1.133 sec Downloaded 104857600 bytes in 1.171 sec Downloaded 104857600 bytes in 1.098 sec Finished! Average download speed is 701.88 Mbit/s
Retrieving speedtest.net configuration... Retrieving speedtest.net server list... Testing from SSP Europe GmbH ... Selecting 15 servers that are not too close: 1. goetel GmbH (Gxc3xb6ttingen) [39.90 km]: 10.748 ms 2. Heuer & Sack GbR (Wernigerode) [107.53 km]: 17.454 ms 3. Newone (Ilmenau) [120.64 km]: 11.396 ms 4. Thueringer Netkom GmbH (Weimar) [132.12 km]: 10.879 ms 5. avrx (Limburg an der Lahn) [140.88 km]: 5.347 ms 6. Spacken.net (Hagen) [141.07 km]: 4.948 ms 7. Gemeindewerke Nxc3xbcmbrecht GmbH (Nxc3xbcmbrecht) [143.00 km]: 13.305 ms 8. fdcservers.net (Frankfurt) [143.90 km]: 6.17 ms 9. Vodafone Kabel Deutschland (Frankfurt) [143.90 km]: 5.221 ms 10. synch.cc (Frankfurt) [143.90 km]: 8.227 ms 11. 23media GmbH (Frankfurt) [143.90 km]: 5.038 ms 12. DEAC (Frankfurt) [143.90 km]: 33.12 ms 13. MK Netzdienste (Frankfurt) [143.90 km]: 5.334 ms 14. ITprosteer (Frankfurt) [143.90 km]: 7.001 ms 15. Gemnet LLC (Frankfurt) [143.90 km]: 24.427 ms Testing upload speeds 1. goetel GmbH (Gxc3xb6ttingen): ......................... 460.44 Mbit/s 2. Heuer & Sack GbR (Wernigerode): ......................... 177.64 Mbit/s 3. Newone (Ilmenau): ......................... 254.51 Mbit/s 4. Thueringer Netkom GmbH (Weimar): ......................... 456.49 Mbit/s 5. avrx (Limburg an der Lahn): ......................... 516.54 Mbit/s 6. Spacken.net (Hagen): ......................... 519.80 Mbit/s 7. Gemeindewerke Nxc3xbcmbrecht GmbH (Nxc3xbcmbrecht): ......................... 344.37 Mbit/s 8. fdcservers.net (Frankfurt): ......................... 565.88 Mbit/s 9. Vodafone Kabel Deutschland (Frankfurt): ......................... 524.97 Mbit/s 10. synch.cc (Frankfurt): ......................... 412.23 Mbit/s 11. 23media GmbH (Frankfurt): ......................... 555.15 Mbit/s 12. DEAC (Frankfurt): ......................... 70.97 Mbit/s 13. MK Netzdienste (Frankfurt): ......................... 603.77 Mbit/s 14. ITprosteer (Frankfurt): ......................... 425.66 Mbit/s 15. Gemnet LLC (Frankfurt): ......................... 455.54 Mbit/s Average upload speed is 422.93 Mbit/s