class DocumentMover
Moves documents between folders using the working upload mechanism
/tf/active/vicechatdev/e-ink-llm/cloudtest/test_move_from_trash.py
52 - 520
moderate
Purpose
Moves documents between folders using the working upload mechanism
Source Code
class DocumentMover:
"""Moves documents between folders using the working upload mechanism"""
def __init__(self):
# Load auth session
auth = RemarkableAuth()
self.session = auth.get_authenticated_session()
if not self.session:
raise RuntimeError("Failed to authenticate with reMarkable")
print("š Document Mover Initialized")
def get_current_root_info(self):
"""Get current root.docSchema info using working method"""
print("\nš Step 1: Getting current root.docSchema...")
# Get root info
root_response = self.session.get("https://eu.tectonic.remarkable.com/sync/v4/root")
root_response.raise_for_status()
root_data = root_response.json()
# Get root content
root_content_response = self.session.get(f"https://eu.tectonic.remarkable.com/sync/v3/files/{root_data['hash']}")
root_content_response.raise_for_status()
root_content = root_content_response.text
print(f"ā
Current root hash: {root_data['hash']}")
print(f"ā
Current generation: {root_data.get('generation')}")
print(f"ā
Root content size: {len(root_content)} bytes")
return root_data, root_content
def get_document_info(self, doc_uuid: str, root_content: str):
"""Find document entry in root.docSchema"""
print(f"\nš Step 2: Finding document {doc_uuid[:8]}... in root.docSchema")
lines = root_content.strip().split('\n')
for line in lines[1:]: # Skip version header
if doc_uuid in line:
parts = line.split(':')
if len(parts) >= 5:
doc_info = {
'hash': parts[0],
'uuid': parts[2],
'type': parts[3],
'size': parts[4],
'full_line': line
}
print(f"ā
Found document entry:")
print(f" Hash: {doc_info['hash']}")
print(f" Type: {doc_info['type']}")
print(f" Size: {doc_info['size']}")
print(f" Full line: {doc_info['full_line']}")
return doc_info
raise ValueError(f"Document {doc_uuid} not found in root.docSchema")
def get_document_schema(self, doc_hash: str):
"""Retrieve document's docSchema"""
print(f"\nš Step 3: Retrieving document docSchema...")
doc_response = self.session.get(f"https://eu.tectonic.remarkable.com/sync/v3/files/{doc_hash}")
doc_response.raise_for_status()
doc_content = doc_response.text
print(f"ā
Document docSchema size: {len(doc_content)} bytes")
print(f"š Document docSchema content:")
lines = doc_content.strip().split('\n')
for i, line in enumerate(lines):
print(f" Line {i}: {line}")
return doc_content, lines
def get_current_metadata(self, doc_lines: list):
"""Extract and fetch current metadata"""
print(f"\nš Step 4: Getting current metadata...")
metadata_hash = None
metadata_line = None
# Find metadata component
for line in doc_lines[1:]: # Skip version
if ':' in line and '.metadata' in line:
parts = line.split(':')
if len(parts) >= 5:
metadata_hash = parts[0]
metadata_line = line
break
if not metadata_hash:
raise ValueError("Metadata component not found in document schema")
print(f"ā
Metadata hash: {metadata_hash}")
# Fetch current metadata
metadata_response = self.session.get(f"https://eu.tectonic.remarkable.com/sync/v3/files/{metadata_hash}")
metadata_response.raise_for_status()
current_metadata = json.loads(metadata_response.text)
print(f"ā
Current metadata:")
for key, value in current_metadata.items():
print(f" {key}: {value}")
return current_metadata, metadata_line
def create_updated_metadata(self, current_metadata: dict, new_parent: str = ""):
"""Create updated metadata with new parent"""
print(f"\nš Step 5: Creating updated metadata...")
# Copy current metadata and update parent
updated_metadata = current_metadata.copy()
old_parent = updated_metadata.get('parent', '')
updated_metadata['parent'] = new_parent
print(f"ā
Updating parent: '{old_parent}' ā '{new_parent}'")
# Add/update source field to match real app documents (use macOS like real invoice)
updated_metadata['source'] = 'com.remarkable.macos' # Always set to match real invoice
print(f"ā
Setting 'source' field: com.remarkable.macos")
# Fix lastOpened to match real app behavior (use 0 for unopened)
if 'lastOpened' in updated_metadata and updated_metadata['lastOpened'] != 0:
updated_metadata['lastOpened'] = 0 # Real app uses 0 for unopened documents
print(f"ā
Setting lastOpened to 0 (real app behavior)")
# Make metadata match real app behavior (don't mark as modified for moves)
updated_metadata['lastModified'] = int(time.time() * 1000)
updated_metadata['metadatamodified'] = False # Real app doesn't mark as modified
updated_metadata['modified'] = False # Real app doesn't mark as modified
# Convert to JSON
updated_metadata_json = json.dumps(updated_metadata, separators=(',', ':'))
print(f"ā
Updated metadata ({len(updated_metadata_json)} bytes):")
print(f" {updated_metadata_json[:100]}...")
return updated_metadata_json
def upload_new_metadata(self, metadata_json: str, doc_uuid: str):
"""Upload new metadata and return hash"""
print(f"\nā¬ļø Step 6: Uploading new metadata...")
# Calculate hash
metadata_hash = hashlib.sha256(metadata_json.encode()).hexdigest()
print(f"ā
New metadata hash: {metadata_hash}")
# Upload using working method from upload_manager.py
headers = {
'Content-Type': 'application/octet-stream',
'rm-batch-number': '1',
'rm-filename': f'{doc_uuid}.metadata', # Required: UUID.metadata format
'rm-sync-id': str(uuid.uuid4()),
'User-Agent': 'reMarkable-desktop-win/3.11.1.1951', # Use Windows UA
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-BE,*',
'Connection': 'Keep-Alive'
}
# Add CRC32C checksum
crc32c_header = compute_crc32c_header(metadata_json.encode())
if crc32c_header:
headers['x-goog-hash'] = crc32c_header
upload_response = self.session.put(
f"https://eu.tectonic.remarkable.com/sync/v3/files/{metadata_hash}",
data=metadata_json.encode(),
headers=headers
)
print(f"ā
Metadata upload response: {upload_response.status_code}")
if upload_response.status_code not in [200, 202]:
print(f"ā Upload failed: {upload_response.text}")
raise RuntimeError(f"Metadata upload failed: {upload_response.status_code}")
return metadata_hash
def upload_real_pagedata(self, doc_uuid: str):
"""Upload real pagedata (newline) to match real app documents"""
print(f"\nā¬ļø Step 6.5: Uploading real pagedata...")
# Real app pagedata is just a newline character
pagedata_content = "\n"
pagedata_hash = hashlib.sha256(pagedata_content.encode()).hexdigest()
print(f"ā
Real pagedata hash: {pagedata_hash}")
print(f"ā
Real pagedata content: {repr(pagedata_content)} ({len(pagedata_content)} bytes)")
# Upload pagedata using working method
headers = {
'Content-Type': 'application/octet-stream',
'rm-batch-number': '1',
'rm-filename': f'{doc_uuid}.pagedata', # Required: UUID.pagedata format
'rm-sync-id': str(uuid.uuid4()),
'User-Agent': 'reMarkable-desktop-win/3.11.1.1951',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-BE,*',
'Connection': 'Keep-Alive'
}
# Add CRC32C checksum
crc32c_header = compute_crc32c_header(pagedata_content.encode())
if crc32c_header:
headers['x-goog-hash'] = crc32c_header
upload_response = self.session.put(
f"https://eu.tectonic.remarkable.com/sync/v3/files/{pagedata_hash}",
data=pagedata_content.encode(),
headers=headers
)
print(f"ā
Pagedata upload response: {upload_response.status_code}")
if upload_response.status_code not in [200, 202]:
print(f"ā Upload failed: {upload_response.text}")
raise RuntimeError(f"Pagedata upload failed: {upload_response.status_code}")
return pagedata_hash
def create_new_document_schema(self, doc_lines: list, new_metadata_hash: str, metadata_line: str, new_pagedata_hash: str = None):
"""Create new document schema with updated metadata hash and pagedata"""
print(f"\nšļø Step 7: Creating new document schema...")
# Replace metadata line and pagedata line with new hashes
new_lines = []
pagedata_line = None
# Find pagedata line
for line in doc_lines[1:]: # Skip version
if ':' in line and '.pagedata' in line:
pagedata_line = line
break
for line in doc_lines:
if line == metadata_line:
# Replace metadata hash but keep size
parts = line.split(':')
parts[0] = new_metadata_hash # Update hash
new_line = ':'.join(parts)
new_lines.append(new_line)
print(f"ā
Updated metadata line:")
print(f" Old: {line}")
print(f" New: {new_line}")
elif new_pagedata_hash and line == pagedata_line:
# Replace pagedata hash and update size to 1 byte
parts = line.split(':')
parts[0] = new_pagedata_hash # Update hash
parts[4] = '1' # Update size to 1 byte (newline)
new_line = ':'.join(parts)
new_lines.append(new_line)
print(f"ā
Updated pagedata line:")
print(f" Old: {line}")
print(f" New: {new_line}")
else:
new_lines.append(line)
new_doc_content = '\n'.join(new_lines)
print(f"ā
New document schema ({len(new_doc_content)} bytes):")
for i, line in enumerate(new_lines):
print(f" Line {i}: {line}")
return new_doc_content
def upload_new_document_schema(self, doc_content: str, doc_uuid: str):
"""Upload new document schema"""
print(f"\nā¬ļø Step 8: Uploading new document schema...")
# Calculate hash
doc_hash = hashlib.sha256(doc_content.encode()).hexdigest()
print(f"ā
New document schema hash: {doc_hash}")
# Upload using working method
headers = {
'Content-Type': 'application/octet-stream',
'rm-batch-number': '1',
'rm-filename': f'{doc_uuid}.docSchema', # Required: UUID.docSchema format
'rm-sync-id': str(uuid.uuid4()),
'User-Agent': 'reMarkable-desktop-win/3.11.1.1951',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-BE,*',
'Connection': 'Keep-Alive'
}
# Add CRC32C checksum
crc32c_header = compute_crc32c_header(doc_content.encode())
if crc32c_header:
headers['x-goog-hash'] = crc32c_header
upload_response = self.session.put(
f"https://eu.tectonic.remarkable.com/sync/v3/files/{doc_hash}",
data=doc_content.encode(),
headers=headers
)
print(f"ā
Document schema upload response: {upload_response.status_code}")
if upload_response.status_code not in [200, 202]:
print(f"ā Upload failed: {upload_response.text}")
raise RuntimeError(f"Document schema upload failed: {upload_response.status_code}")
return doc_hash
def update_root_docschema(self, root_content: str, doc_info: dict, new_doc_hash: str):
"""Update root.docSchema with new document hash"""
print(f"\nš Step 9: Updating root.docSchema...")
# Replace old document line with new hash
old_line = doc_info['full_line']
parts = old_line.split(':')
parts[0] = new_doc_hash # Update document hash
new_line = ':'.join(parts)
print(f"ā
Updating root.docSchema entry:")
print(f" Old: {old_line}")
print(f" New: {new_line}")
# Replace in root content
new_root_content = root_content.replace(old_line, new_line)
print(f"ā
New root.docSchema size: {len(new_root_content)} bytes")
return new_root_content
def upload_new_root(self, root_content: str, generation: int):
"""Upload new root.docSchema and update roothash"""
print(f"\nā¬ļø Step 10: Uploading new root.docSchema...")
# Calculate hash
root_hash = hashlib.sha256(root_content.encode()).hexdigest()
print(f"ā
New root hash: {root_hash}")
# Upload root content using working method
headers = {
'Content-Type': 'text/plain',
'rm-batch-number': '1',
'rm-filename': 'root.docSchema', # System filename for root.docSchema
'rm-sync-id': str(uuid.uuid4()),
'User-Agent': 'reMarkable-desktop-win/3.11.1.1951',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-BE,*',
'Connection': 'Keep-Alive'
}
# Add CRC32C checksum (from test_uploads.py method)
crc32c_header = compute_crc32c_header(root_content.encode())
if crc32c_header:
headers['x-goog-hash'] = crc32c_header
upload_response = self.session.put(
f"https://eu.tectonic.remarkable.com/sync/v3/files/{root_hash}",
data=root_content.encode(),
headers=headers
)
print(f"ā
Root content upload response: {upload_response.status_code}")
if upload_response.status_code not in [200, 202]:
print(f"ā Upload failed: {upload_response.text}")
raise RuntimeError(f"Root content upload failed: {upload_response.status_code}")
# Update root hash pointer using working method
print(f"\nš Step 11: Updating root hash pointer...")
# Create root data exactly like working upload_manager.py
root_update_data = {
"broadcast": True,
"generation": generation, # Use generation parameter
"hash": root_hash
}
# Convert to JSON with 2-space indent like real app
root_content_body = json.dumps(root_update_data, indent=2).encode('utf-8')
# Headers exactly like working upload_manager.py
headers = {
'Content-Type': 'application/json',
'rm-batch-number': '1',
'rm-filename': 'roothash',
'rm-sync-id': str(uuid.uuid4()),
'User-Agent': 'reMarkable-desktop-win/3.11.1.1951',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-BE,*',
'Connection': 'Keep-Alive'
}
# Add CRC32C checksum
crc32c_header = compute_crc32c_header(root_content_body)
if crc32c_header:
headers['x-goog-hash'] = crc32c_header
# Use /sync/v3/root endpoint like working code
root_update_response = self.session.put(
"https://eu.tectonic.remarkable.com/sync/v3/root",
data=root_content_body,
headers=headers
)
print(f"ā
Root update response: {root_update_response.status_code}")
if root_update_response.status_code not in [200, 202]:
print(f"ā Root update failed: {root_update_response.text}")
raise RuntimeError(f"Root update failed: {root_update_response.status_code}")
return root_hash
def move_document_from_trash(self, doc_uuid: str):
"""Complete process to move document from trash to root"""
print(f"š Moving Document from Trash to Root")
print(f"Document UUID: {doc_uuid}")
print("=" * 60)
try:
# Step 1: Get current root info
root_data, root_content = self.get_current_root_info()
# Step 2: Find document in root
doc_info = self.get_document_info(doc_uuid, root_content)
# Step 3: Get document schema
doc_content, doc_lines = self.get_document_schema(doc_info['hash'])
# Step 4: Get current metadata
current_metadata, metadata_line = self.get_current_metadata(doc_lines)
# Check current parent and determine move action
current_parent = current_metadata.get('parent', '')
if current_parent == 'trash':
print(f"š Document is in trash, moving to gpt_in folder...")
target_parent = "99c6551f-2855-44cf-a4e4-c9c586558f42" # gpt_in folder
move_description = "from trash to gpt_in folder"
elif current_parent == '':
print(f"š Document is in root, moving to gpt_in folder...")
target_parent = "99c6551f-2855-44cf-a4e4-c9c586558f42" # gpt_in folder
move_description = "from root to gpt_in folder"
else:
print(f"š Document is in folder '{current_parent}', moving to gpt_in folder...")
target_parent = "99c6551f-2855-44cf-a4e4-c9c586558f42" # gpt_in folder
move_description = f"from folder '{current_parent}' to gpt_in folder"
# Step 5: Create updated metadata (move to gpt_in folder)
updated_metadata_json = self.create_updated_metadata(current_metadata, new_parent=target_parent)
# Step 6: Upload new metadata
new_metadata_hash = self.upload_new_metadata(updated_metadata_json, doc_uuid)
# Step 6.5: Upload real pagedata to match real app
new_pagedata_hash = self.upload_real_pagedata(doc_uuid)
# Step 7: Create new document schema
new_doc_content = self.create_new_document_schema(doc_lines, new_metadata_hash, metadata_line, new_pagedata_hash)
# Step 8: Upload new document schema
new_doc_hash = self.upload_new_document_schema(new_doc_content, doc_uuid)
# Step 9: Update root.docSchema
new_root_content = self.update_root_docschema(root_content, doc_info, new_doc_hash)
# Step 10-11: Upload new root and update pointer
new_root_hash = self.upload_new_root(new_root_content, root_data['generation'])
print(f"\nš SUCCESS! Document moved {move_description}")
print(f" Document: {current_metadata.get('visibleName')}")
print(f" Old parent: {current_parent or '(root)'}")
print(f" New parent: gpt_in ({target_parent})")
print(f" New root hash: {new_root_hash}")
return True
except Exception as e:
print(f"\nā Move operation failed: {e}")
return False
Parameters
| Name | Type | Default | Kind |
|---|---|---|---|
bases |
- | - |
Parameter Details
bases: Parameter of type
Return Value
Returns unspecified type
Class Interface
Methods
__init__(self)
Purpose: Internal method: init
Returns: None
get_current_root_info(self)
Purpose: Get current root.docSchema info using working method
Returns: None
get_document_info(self, doc_uuid, root_content)
Purpose: Find document entry in root.docSchema
Parameters:
doc_uuid: Type: strroot_content: Type: str
Returns: None
get_document_schema(self, doc_hash)
Purpose: Retrieve document's docSchema
Parameters:
doc_hash: Type: str
Returns: None
get_current_metadata(self, doc_lines)
Purpose: Extract and fetch current metadata
Parameters:
doc_lines: Type: list
Returns: None
create_updated_metadata(self, current_metadata, new_parent)
Purpose: Create updated metadata with new parent
Parameters:
current_metadata: Type: dictnew_parent: Type: str
Returns: None
upload_new_metadata(self, metadata_json, doc_uuid)
Purpose: Upload new metadata and return hash
Parameters:
metadata_json: Type: strdoc_uuid: Type: str
Returns: See docstring for return details
upload_real_pagedata(self, doc_uuid)
Purpose: Upload real pagedata (newline) to match real app documents
Parameters:
doc_uuid: Type: str
Returns: None
create_new_document_schema(self, doc_lines, new_metadata_hash, metadata_line, new_pagedata_hash)
Purpose: Create new document schema with updated metadata hash and pagedata
Parameters:
doc_lines: Type: listnew_metadata_hash: Type: strmetadata_line: Type: strnew_pagedata_hash: Type: str
Returns: None
upload_new_document_schema(self, doc_content, doc_uuid)
Purpose: Upload new document schema
Parameters:
doc_content: Type: strdoc_uuid: Type: str
Returns: None
update_root_docschema(self, root_content, doc_info, new_doc_hash)
Purpose: Update root.docSchema with new document hash
Parameters:
root_content: Type: strdoc_info: Type: dictnew_doc_hash: Type: str
Returns: None
upload_new_root(self, root_content, generation)
Purpose: Upload new root.docSchema and update roothash
Parameters:
root_content: Type: strgeneration: Type: int
Returns: None
move_document_from_trash(self, doc_uuid)
Purpose: Complete process to move document from trash to root
Parameters:
doc_uuid: Type: str
Returns: None
Required Imports
import json
import time
import hashlib
import uuid
import base64
Usage Example
# Example usage:
# result = DocumentMover(bases)
Tags
Similar Components
AI-powered semantic similarity - components with related functionality:
-
class PylontechMover 76.4% similar
-
class DocumentToTrashMover 59.5% similar
-
function main_v86 58.8% similar
-
class DocumentProcessor_v7 55.9% similar
-
function main_v27 54.1% similar