o
    MfP                     @   s   d Z ddlZddlZddlmZ ddlmZ ddlmZ ddlm	Z	 ddlm
Z
 dZd	Zh d
Zh dZG dd dejZG dd dejZG dd deZdS )zFModule for file-like access of blobs, usually invoked via Blob.open().    N)RequestRangeNotSatisfiable)_NUM_RETRIES_MESSAGE)DEFAULT_RETRY)%DEFAULT_RETRY_IF_GENERATION_SPECIFIED)ConditionalRetryPolicyi   i  >   retrytimeoutraw_downloadif_generation_matchif_generation_not_matchif_metageneration_matchif_metageneration_not_match>
   r   r   checksumnum_retriescontent_typepredefined_aclr
   r   r   r   c                   @   sh   e Zd ZdZdefddZdddZddd	ZdddZdd Z	e
dd Zdd Zdd Zdd ZdS )
BlobReadera  A file-like object that reads from a blob.

    :type blob: 'google.cloud.storage.blob.Blob'
    :param blob:
        The blob to download.

    :type chunk_size: long
    :param chunk_size:
        (Optional) The minimum number of bytes to read at a time. If fewer
        bytes than the chunk_size are requested, the remainder is buffered.
        The default is the chunk_size of the blob, or 40MiB.

    :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
    :param retry:
        (Optional) How to retry the RPC. A None value will disable
        retries. A google.api_core.retry.Retry value will enable retries,
        and the object will define retriable response codes and errors and
        configure backoff and timeout options.

        A google.cloud.storage.retry.ConditionalRetryPolicy value wraps a
        Retry object and activates it only if certain conditions are met.
        This class exists to provide safe defaults for RPC calls that are
        not technically safe to retry normally (due to potential data
        duplication or other side-effects) but become safe to retry if a
        condition such as if_metageneration_match is set.

        See the retry.py source code and docstrings in this package
        (google.cloud.storage.retry) for information on retry types and how
        to configure them.

        Media operations (downloads and uploads) do not support non-default
        predicates in a Retry object. The default will always be used. Other
        configuration changes for Retry objects such as delays and deadlines
        are respected.

    :param download_kwargs:
        Keyword arguments to pass to the underlying API calls.
        The following arguments are supported:

        - ``if_generation_match``
        - ``if_generation_not_match``
        - ``if_metageneration_match``
        - ``if_metageneration_not_match``
        - ``timeout``

        Note that download_kwargs are also applied to blob.reload(), if a reload
        is needed during seek().
    Nc                 K   sX   |D ]}|t vrtd| dq|| _d| _t | _|p"|jp"t| _	|| _
|| _d S )Nz-BlobReader does not support keyword argument .r   )VALID_DOWNLOAD_KWARGS
ValueError_blob_posioBytesIO_buffer
chunk_sizeDEFAULT_CHUNK_SIZE_chunk_size_retry_download_kwargs)selfblobr   r   download_kwargskwarg r$   ]/var/www/html/analyze/labelStudio/lib/python3.10/site-packages/google/cloud/storage/fileio.py__init__m   s   


zBlobReader.__init__c                 C   s  |    | j|}|t| }|dks|dk r|  j| j 7  _t|}| jd | jd | j}|dkrB|t|| j	 }nd }z|| j
jd||d | jd| j7 }W n	 tya   Y nw |dkrt||kr| j||d   | jd |d | }|  jt|| 7  _|S )Nr   )startendr   r   r$   )_checkClosedr   readlenr   tellseektruncatemaxr   r   download_as_bytesr   r   r   write)r    sizeresultremaining_size	read_sizefetch_start	fetch_endr$   r$   r%   r+   {   s<   
zBlobReader.readc                 C   s
   |  |S N)r+   )r    r3   r$   r$   r%   read1      
zBlobReader.read1r   c                 C   s   |    | jjdu r| jjdi | j | j| j  }|dkr#|}n|dkr,|| }n
|dkr6| jj| }|dvr>td|| jjkrH| jj}|| jk r`| j	d | j
d |}|| _|S || }| j| j	|d }|S )a  Seek within the blob.

        This implementation of seek() uses knowledge of the blob size to
        validate that the reported position does not exceed the blob last byte.
        If the blob size is not already known it will call blob.reload().
        Nr         >   r   r<   r=   zinvalid whence valuer$   )r*   r   r3   reloadr   r   r   r-   r   r.   r/   )r    poswhenceinitial_offset
target_posnew_pos
differencer$   r$   r%   r.      s.   

zBlobReader.seekc                 C   s   | j   d S r9   r   closer    r$   r$   r%   rF      s   zBlobReader.closec                 C      | j jS r9   r   closedrG   r$   r$   r%   rJ         zBlobReader.closedc                 C      dS NTr$   rG   r$   r$   r%   readable      zBlobReader.readablec                 C   rL   NFr$   rG   r$   r$   r%   writable   rO   zBlobReader.writablec                 C   rL   rM   r$   rG   r$   r$   r%   seekable   rO   zBlobReader.seekabler'   )r   )__name__
__module____qualname____doc__r   r&   r+   r:   r.   rF   propertyrJ   rN   rQ   rR   r$   r$   r$   r%   r   ;   s    1

-
(
r   c                   @   s   e Zd ZdZdddefddZedd Zejdd Zd	d
 Z	dd Z
dd Zdd Zdd Zdd Zedd Zdd Zdd Zdd ZdS )
BlobWriteram  A file-like object that writes to a blob.

    :type blob: 'google.cloud.storage.blob.Blob'
    :param blob:
        The blob to which to write.

    :type chunk_size: long
    :param chunk_size:
        (Optional) The maximum number of bytes to buffer before sending data
        to the server, and the size of each request when data is sent.
        Writes are implemented as a "resumable upload", so chunk_size for
        writes must be exactly a multiple of 256KiB as with other resumable
        uploads. The default is the chunk_size of the blob, or 40 MiB.

    :type text_mode: bool
    :param text_mode:
        (Deprecated) A synonym for ignore_flush. For backwards-compatibility,
        if True, sets ignore_flush to True. Use ignore_flush instead. This
        parameter will be removed in a future release.

    :type ignore_flush: bool
    :param ignore_flush:
        Makes flush() do nothing instead of raise an error. flush() without
        closing is not supported by the remote service and therefore calling it
        on this class normally results in io.UnsupportedOperation. However, that
        behavior is incompatible with some consumers and wrappers of file
        objects in Python, such as zipfile.ZipFile or io.TextIOWrapper. Setting
        ignore_flush will cause flush() to successfully do nothing, for
        compatibility with those contexts. The correct way to actually flush
        data to the remote server is to close() (using this object as a context
        manager is recommended).

    :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
    :param retry:
        (Optional) How to retry the RPC. A None value will disable
        retries. A google.api_core.retry.Retry value will enable retries,
        and the object will define retriable response codes and errors and
        configure backoff and timeout options.

        A google.cloud.storage.retry.ConditionalRetryPolicy value wraps a
        Retry object and activates it only if certain conditions are met.
        This class exists to provide safe defaults for RPC calls that are
        not technically safe to retry normally (due to potential data
        duplication or other side-effects) but become safe to retry if a
        condition such as if_metageneration_match is set.

        See the retry.py source code and docstrings in this package
        (google.cloud.storage.retry) for information on retry types and how
        to configure them.

        Media operations (downloads and uploads) do not support non-default
        predicates in a Retry object. The default will always be used. Other
        configuration changes for Retry objects such as delays and deadlines
        are respected.

    :param upload_kwargs:
        Keyword arguments to pass to the underlying API
        calls. The following arguments are supported:

        - ``if_generation_match``
        - ``if_generation_not_match``
        - ``if_metageneration_match``
        - ``if_metageneration_not_match``
        - ``timeout``
        - ``content_type``
        - ``num_retries``
        - ``predefined_acl``
        - ``checksum``
    NFc                 K   s`   |D ]}|t vrtd| dq|| _t | _d | _|p!|jp!t| _|p&|| _	|| _
|| _d S )Nz-BlobWriter does not support keyword argument r   )VALID_UPLOAD_KWARGSr   r   SlidingBufferr   _upload_and_transportr   r   r   _ignore_flushr   _upload_kwargs)r    r!   r   	text_modeignore_flushr   upload_kwargsr#   r$   r$   r%   r&   +  s   	


zBlobWriter.__init__c                 C      | j S )zGet the blob's default chunk size.

        :rtype: int or ``NoneType``
        :returns: The current blob's chunk size, if it is set.
        )_BlobWriter__chunk_sizerG   r$   r$   r%   r   D  s   zBlobWriter._chunk_sizec                 C   s2   |dur|dkr|t  dkrtdt  || _dS )a	  Set the blob's default chunk size.

        :type value: int
        :param value: (Optional) The current blob's chunk size, if it is set.

        :raises: :class:`ValueError` if ``value`` is not ``None`` and is not a
                 multiple of 256 KiB.
        Nr   z$Chunk size must be a multiple of %d.)CHUNK_SIZE_MULTIPLEr   rc   )r    valuer$   r$   r%   r   M  s
   

c                 C   s6   |    | j|}t| j| j }|r| | |S r9   )r*   r   r2   r,   r   _upload_chunks_from_buffer)r    br?   
num_chunksr$   r$   r%   r2   ]  s   
zBlobWriter.writec                 C   s   | j dd }| j}| j dd }|d ur#tjttdd |tu r#d }t|t	r;| j 
d| j 
dd}|j|d}| jj| jjj| j|d |f| j|d	| j | _d S )
Nr   r   r=   )
stacklevelr
   r   )ifGenerationMatchifMetagenerationMatch)query_params)r   r   )r^   popr   warningswarnr   DeprecationWarningr   
isinstancer   get"get_retry_policy_if_conditions_metr   _initiate_resumable_uploadbucketclientr   r   r\   )r    r   r   r   rl   r$   r$   r%   _initiate_uploadi  s4   

zBlobWriter._initiate_uploadc                 C   sd   | j s|   | j \}}i }d| jv rd| jdi}t|D ]}|j|fi | q| j  dS )z$Upload a specified number of chunks.r   N)r\   rw   r^   rr   rangetransmit_next_chunkr   flush)r    rh   upload	transportkwargs_r$   r$   r%   rf     s   

z%BlobWriter._upload_chunks_from_bufferc                 C   s   | j  t| j  S r9   )r   r-   r,   rG   r$   r$   r%   r-     s   zBlobWriter.tellc                 C   s   | j stdd S )NzCannot flush without finalizing upload. Use close() instead, or set ignore_flush=True when constructing this class (see docstring).)r]   r   UnsupportedOperationrG   r$   r$   r%   rz     s
   zBlobWriter.flushc                 C   s    | j js	| d | j   d S )Nr<   )r   rJ   rf   rF   rG   r$   r$   r%   rF     s   
zBlobWriter.closec                 C   rH   r9   rI   rG   r$   r$   r%   rJ     rK   zBlobWriter.closedc                 C   rL   rP   r$   rG   r$   r$   r%   rN     rO   zBlobWriter.readablec                 C   rL   rM   r$   rG   r$   r$   r%   rQ     rO   zBlobWriter.writablec                 C   rL   rP   r$   rG   r$   r$   r%   rR     rO   zBlobWriter.seekable)rT   rU   rV   rW   r   r&   rX   r   setterr2   rw   rf   r-   rz   rF   rJ   rN   rQ   rR   r$   r$   r$   r%   rY      s,    I


'

rY   c                   @   sf   e Zd ZdZdd Zdd ZdddZd	d
 Zdd Zdd Z	dd Z
dd Zdd Zedd ZdS )r[   a  A non-rewindable buffer that frees memory of chunks already consumed.

    This class is necessary because `google-resumable-media-python` expects
    `tell()` to work relative to the start of the file, not relative to a place
    in an intermediate buffer. Using this class, we present an external
    interface with consistent seek and tell behavior without having to actually
    store bytes already sent.

    Behavior of this class differs from an ordinary BytesIO buffer. `write()`
    will always append to the end of the file only and not change the seek
    position otherwise. `flush()` will delete all data already read (data to the
    left of the seek position). `tell()` will report the seek position of the
    buffer including all deleted data. Additionally the class implements
    __len__() which will report the size of the actual underlying buffer.

    This class does not attempt to implement the entire Python I/O interface.
    c                 C   s   t  | _d| _d S )Nr   )r   r   r   _cursorrG   r$   r$   r%   r&     s   

zSlidingBuffer.__init__c                 C   sD   |    | j }| jdtj | j|}| j| | j| S )z>Append to the end of the buffer without changing the position.r   )r*   r   r-   r.   r   SEEK_ENDr2   r   )r    rg   bookmarkr?   r$   r$   r%   r2     s   

zSlidingBuffer.writer'   c                 C   s*   |    | j|}|  jt|7  _|S )zRead and move the cursor.)r*   r   r+   r   r,   )r    r3   datar$   r$   r%   r+     s   zSlidingBuffer.readc                 C   sF   |    | j }| jd | jd | j| | jd dS )z@Delete already-read data (all data to the left of the position).r   N)r*   r   r+   r.   r/   r2   )r    leftoverr$   r$   r%   rz     s   
zSlidingBuffer.flushc                 C   rb   )z>Report how many bytes have been read from the buffer in total.)r   rG   r$   r$   r%   r-     s   zSlidingBuffer.tellc                 C   sb   |    | j }|| j }| j|tj}|| |kr!|| jkr+| j| td|| _| jS )a]  Seek to a position (backwards only) within the internal buffer.

        This implementation of seek() verifies that the seek destination is
        contained in _buffer. It will raise ValueError if the destination byte
        has already been purged from the buffer.

        The "whence" argument is not supported in this implementation.
        zCannot seek() to that value.)r*   r   r-   r   r.   r   SEEK_CURr   )r    r?   buffer_initial_posrD   buffer_seek_resultr$   r$   r%   r.     s   	


zSlidingBuffer.seekc                 C   s*   | j  }| j dtj}| j | |S )z7Determine the size of the buffer by seeking to the end.r   )r   r-   r.   r   r   )r    r   lengthr$   r$   r%   __len__  s   
zSlidingBuffer.__len__c                 C   
   | j  S r9   rE   rG   r$   r$   r%   rF      r;   zSlidingBuffer.closec                 C   r   r9   )r   r*   rG   r$   r$   r%   r*   #  r;   zSlidingBuffer._checkClosedc                 C   rH   r9   rI   rG   r$   r$   r%   rJ   &  rK   zSlidingBuffer.closedNrS   )rT   rU   rV   rW   r&   r2   r+   rz   r-   r.   r   rF   r*   rX   rJ   r$   r$   r$   r%   r[     s    

r[   )rW   r   rn   google.api_core.exceptionsr   google.cloud.storage._helpersr   google.cloud.storage.retryr   r   r   rd   r   r   rZ   BufferedIOBaser   rY   objectr[   r$   r$   r$   r%   <module>   s"    * c