Use blake3 instead of blake2 for a considerable performance increase
Based on some rough benchmarking performed on reasonably modern, but not over the top laptop hardware (i7-8665U + PCIe3 NVMe SSD) this results in raw disk io (+ tar overhead) becoming the performance bottleneck instead of hashing rate.
This commit is contained in:
parent
3fb80a4394
commit
f90ac943cf
7
main.py
7
main.py
@ -1,11 +1,11 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
from sys import stdin
|
from sys import stdin
|
||||||
from sys import exit as sysexit
|
from sys import exit as sysexit
|
||||||
from hashlib import blake2b
|
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from subprocess import Popen, PIPE
|
from subprocess import Popen, PIPE
|
||||||
|
|
||||||
import click
|
import click
|
||||||
|
from blake3 import blake3
|
||||||
|
|
||||||
from identicon import Identicon
|
from identicon import Identicon
|
||||||
|
|
||||||
@ -86,10 +86,11 @@ def print_usage_and_exit():
|
|||||||
|
|
||||||
|
|
||||||
def get_digest(stream):
|
def get_digest(stream):
|
||||||
hasher = blake2b(digest_size=DIGEST_SIZE)
|
# pylint: disable=not-callable
|
||||||
|
hasher = blake3()
|
||||||
while data := stream.read(BUF_SIZE):
|
while data := stream.read(BUF_SIZE):
|
||||||
hasher.update(data)
|
hasher.update(data)
|
||||||
return hasher.digest()
|
return hasher.digest(length=DIGEST_SIZE)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user