o
    rMfÄ%  ã                   @   st   d dl Z d dlmZmZmZ d dlmZ dZG dd„ deƒZG dd„ deƒZ	d	d
„ Z
efdd„ZG dd„ deƒZdS )é    N)Úchunk_hashesÚ	tree_hashÚbytes_to_hex)Úcompute_hashes_from_fileobji   c                   @   s0   e Zd ZdZdd„ Zdd„ Zdd„ Zdd	„ Zd
S )Ú_Partitionera¨  Convert variable-size writes into part-sized writes

    Call write(data) with variable sized data as needed to write all data. Call
    flush() after all data is written.

    This instance will call send_fn(part_data) as needed in part_size pieces,
    except for the final part which may be shorter than part_size. Make sure to
    call flush() to ensure that a short final part results in a final send_fn
    call.

    c                 C   s   || _ || _g | _d| _d S ©Nr   )Ú	part_sizeÚsend_fnÚ_bufferÚ_buffer_size)Úselfr   r	   © r   úU/var/www/html/analyze/labelStudio/lib/python3.10/site-packages/boto/glacier/writer.pyÚ__init__1   s   
z_Partitioner.__init__c                 C   sR   |dkrd S | j  |¡ |  jt|ƒ7  _| j| jkr'|  ¡  | j| jksd S d S )Nó    )r
   Úappendr   Úlenr   Ú
_send_part©r   Údatar   r   r   Úwrite7   s   ÿz_Partitioner.writec                 C   sf   d  | j¡}t|ƒ| jkr|| jd … g| _t| jd ƒ| _ng | _d| _|d | j… }|  |¡ d S )Nr   r   )Újoinr
   r   r   r   r	   )r   r   Úpartr   r   r   r   ?   s   z_Partitioner._send_partc                 C   s   | j dkr|  ¡  d S d S r   )r   r   ©r   r   r   r   ÚflushM   s   
ÿz_Partitioner.flushN)Ú__name__Ú
__module__Ú__qualname__Ú__doc__r   r   r   r   r   r   r   r   r   %   s    r   c                   @   s<   e Zd ZdZefdd„Zdd„ Zdd„ Zdd	„ Zd
d„ Z	dS )Ú	_Uploaderz‚Upload to a Glacier upload_id.

    Call upload_part for each part (in any order) and then close to complete
    the upload.

    c                 C   s4   || _ || _|| _|| _d | _d| _g | _d| _d S )Nr   F)ÚvaultÚ	upload_idr   Ú
chunk_sizeÚ
archive_idÚ_uploaded_sizeÚ_tree_hashesÚclosed©r   r    r!   r   r"   r   r   r   r   Y   s   
z_Uploader.__init__c                 C   s:   t | jƒ}||kr| j d g|| d  ¡ || j|< d S ©Né   )r   r%   Úextend)r   ÚindexÚraw_tree_hashÚlist_lengthr   r   r   Ú_insert_tree_hashe   s   
z_Uploader._insert_tree_hashc           	      C   sš   | j rtdƒ‚tt|| jƒƒ}|  ||¡ t|ƒ}t |¡ 	¡ }| j
| }||t|ƒ d f}| jj | jj| j||||¡}| ¡  |  jt|ƒ7  _dS )z©Upload a part to Glacier.

        :param part_index: part number where 0 is the first part
        :param part_data: data to upload corresponding to this part

        úI/O operation on closed filer)   N)r&   Ú
ValueErrorr   r   r"   r.   r   ÚhashlibÚsha256Ú	hexdigestr   r   r    Úlayer1Úupload_partÚnamer!   Úreadr$   )	r   Ú
part_indexÚ	part_dataÚpart_tree_hashÚhex_tree_hashÚlinear_hashÚstartÚcontent_rangeÚresponser   r   r   r5   k   s$   
ÿüz_Uploader.upload_partc                 C   s,   | j rtdƒ‚|  ||¡ |  j|7  _dS )aÃ  Skip uploading of a part.

        The final close call needs to calculate the tree hash and total size
        of all uploaded data, so this is the mechanism for resume
        functionality to provide it without actually uploading the data again.

        :param part_index: part number where 0 is the first part
        :param part_tree_hash: binary tree_hash of part being skipped
        :param part_length: length of part being skipped

        r/   N)r&   r0   r.   r$   )r   r8   r:   Úpart_lengthr   r   r   Ú	skip_part…   s   z_Uploader.skip_partc                 C   sZ   | j rd S d | jv rtdƒ‚tt| jƒƒ}| jj | jj| j	|| j
¡}|d | _d| _ d S )NzSome parts were not uploaded.Ú	ArchiveIdT)r&   r%   ÚRuntimeErrorr   r   r    r4   Úcomplete_multipart_uploadr6   r!   r$   r#   )r   r;   r?   r   r   r   Úclose–   s   
þ

z_Uploader.closeN)
r   r   r   r   Ú_ONE_MEGABYTEr   r.   r5   rA   rE   r   r   r   r   r   R   s    r   c                 c   s2    |   |¡}|r| d¡V  |   |¡}|sd S d S )Nzutf-8)r7   Úencode)Úfobjr   r   r   r   r   Úgenerate_parts_from_fobj¤   s   €

þrI   c           
      C   sv   t | |||ƒ}tt||ƒƒD ]%\}}tt||ƒƒ}	||vs#|| |	kr*| ||¡ q| ||	t|ƒ¡ q| ¡  |j	S )a²  Resume upload of a file already part-uploaded to Glacier.

    The resumption of an upload where the part-uploaded section is empty is a
    valid degenerate case that this function can handle. In this case,
    part_hash_map should be an empty dict.

    :param vault: boto.glacier.vault.Vault object.
    :param upload_id: existing Glacier upload id of upload being resumed.
    :param part_size: part size of existing upload.
    :param fobj: file object containing local data to resume. This must read
        from the start of the entire upload, not just from the point being
        resumed. Use fobj.seek(0) to achieve this if necessary.
    :param part_hash_map: {part_index: part_tree_hash, ...} of data already
        uploaded. Each supplied part_tree_hash will be verified and the part
        re-uploaded if there is a mismatch.
    :param chunk_size: chunk size of tree hash calculation. This must be
        1 MiB for Amazon.

    )
r   Ú	enumeraterI   r   r   r5   rA   r   rE   r#   )
r    r!   r   rH   Úpart_hash_mapr"   Úuploaderr8   r9   r:   r   r   r   Úresume_file_upload«   s   ÿrM   c                   @   sl   e Zd ZdZefdd„Zdd„ Zdd„ Zdd	„ Zd
d„ Z	e
dd„ ƒZe
dd„ ƒZe
dd„ ƒZe
dd„ ƒZdS )ÚWriterz‡
    Presents a file-like object for writing to a Amazon Glacier
    Archive. The data is written using the multi-part upload API.
    c                 C   s.   t ||||ƒ| _t|| jƒ| _d| _d| _d S )NFr   )r   rL   r   Ú_upload_partÚpartitionerr&   Únext_part_indexr'   r   r   r   r   Ò   s   
zWriter.__init__c                 C   s   | j rtdƒ‚| j |¡ d S )Nr/   )r&   r0   rP   r   r   r   r   r   r   Ø   s   zWriter.writec                 C   s"   | j  | j|¡ |  jd7  _d S r(   )rL   r5   rQ   )r   r9   r   r   r   rO   Ý   s   zWriter._upload_partc                 C   s(   | j rd S | j ¡  | j ¡  d| _ d S )NT)r&   rP   r   rL   rE   r   r   r   r   rE   á   s
   


zWriter.closec                 C   s   |   ¡  | jjS ©N)rE   rL   r#   r   r   r   r   Úget_archive_idè   s   zWriter.get_archive_idc                 C   s   t | jjƒS )z°
        Returns the current tree hash for the data that's been written
        **so far**.

        Only once the writing is complete is the final tree hash returned.
        )r   rL   r%   r   r   r   r   Úcurrent_tree_hashì   s   zWriter.current_tree_hashc                 C   ó   | j jS )z¸
        Returns the current uploaded size for the data that's been written
        **so far**.

        Only once the writing is complete is the final uploaded size returned.
        )rL   r$   r   r   r   r   Úcurrent_uploaded_sizeö   s   zWriter.current_uploaded_sizec                 C   rU   rR   )rL   r!   r   r   r   r   r!      ó   zWriter.upload_idc                 C   rU   rR   )rL   r    r   r   r   r   r      rW   zWriter.vaultN)r   r   r   r   rF   r   r   rO   rE   rS   ÚpropertyrT   rV   r!   r    r   r   r   r   rN   Í   s    
	
	
rN   )r1   Úboto.glacier.utilsr   r   r   r   rF   Úobjectr   r   rI   rM   rN   r   r   r   r   Ú<module>   s   -R
ÿ"