#
tokens: 36608/50000 3/121 files (page 4/4)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 4 of 4. Use http://codebase.md/allenday/solr-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .flake8
├── .gitignore
├── CHANGELOG.md
├── CLAUDE.md
├── CONTRIBUTING.md
├── data
│   ├── bitcoin-whitepaper.json
│   ├── bitcoin-whitepaper.md
│   └── README.md
├── docker-compose.yml
├── LICENSE
├── poetry.lock
├── pyproject.toml
├── QUICKSTART.md
├── README.md
├── scripts
│   ├── check_solr.py
│   ├── create_test_collection.py
│   ├── create_unified_collection.py
│   ├── demo_hybrid_search.py
│   ├── demo_search.py
│   ├── diagnose_search.py
│   ├── direct_mcp_test.py
│   ├── format.py
│   ├── index_documents.py
│   ├── lint.py
│   ├── prepare_data.py
│   ├── process_markdown.py
│   ├── README.md
│   ├── setup.sh
│   ├── simple_index.py
│   ├── simple_mcp_test.py
│   ├── simple_search.py
│   ├── unified_index.py
│   ├── unified_search.py
│   ├── vector_index_simple.py
│   ├── vector_index.py
│   └── vector_search.py
├── solr_config
│   └── unified
│       └── conf
│           ├── schema.xml
│           ├── solrconfig.xml
│           ├── stopwords.txt
│           └── synonyms.txt
├── solr_mcp
│   ├── __init__.py
│   ├── server.py
│   ├── solr
│   │   ├── __init__.py
│   │   ├── client.py
│   │   ├── collections.py
│   │   ├── config.py
│   │   ├── constants.py
│   │   ├── exceptions.py
│   │   ├── interfaces.py
│   │   ├── query
│   │   │   ├── __init__.py
│   │   │   ├── builder.py
│   │   │   ├── executor.py
│   │   │   ├── parser.py
│   │   │   └── validator.py
│   │   ├── response.py
│   │   ├── schema
│   │   │   ├── __init__.py
│   │   │   ├── cache.py
│   │   │   └── fields.py
│   │   ├── utils
│   │   │   ├── __init__.py
│   │   │   └── formatting.py
│   │   ├── vector
│   │   │   ├── __init__.py
│   │   │   ├── manager.py
│   │   │   └── results.py
│   │   └── zookeeper.py
│   ├── tools
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── solr_default_vectorizer.py
│   │   ├── solr_list_collections.py
│   │   ├── solr_list_fields.py
│   │   ├── solr_select.py
│   │   ├── solr_semantic_select.py
│   │   ├── solr_vector_select.py
│   │   └── tool_decorator.py
│   ├── utils.py
│   └── vector_provider
│       ├── __init__.py
│       ├── clients
│       │   ├── __init__.py
│       │   └── ollama.py
│       ├── constants.py
│       ├── exceptions.py
│       └── interfaces.py
├── solr.Dockerfile
└── tests
    ├── __init__.py
    ├── integration
    │   ├── __init__.py
    │   └── test_direct_solr.py
    └── unit
        ├── __init__.py
        ├── conftest.py
        ├── fixtures
        │   ├── __init__.py
        │   ├── common.py
        │   ├── config_fixtures.py
        │   ├── http_fixtures.py
        │   ├── server_fixtures.py
        │   ├── solr_fixtures.py
        │   ├── time_fixtures.py
        │   ├── vector_fixtures.py
        │   └── zookeeper_fixtures.py
        ├── solr
        │   ├── schema
        │   │   └── test_fields.py
        │   ├── test_client.py
        │   ├── test_config.py
        │   ├── utils
        │   │   └── test_formatting.py
        │   └── vector
        │       └── test_results.py
        ├── test_cache.py
        ├── test_client.py
        ├── test_config.py
        ├── test_formatting.py
        ├── test_interfaces.py
        ├── test_parser.py
        ├── test_query.py
        ├── test_schema.py
        ├── test_utils.py
        ├── test_validator.py
        ├── test_vector.py
        ├── test_zookeeper.py
        ├── tools
        │   ├── test_base.py
        │   ├── test_init.py
        │   ├── test_solr_default_vectorizer.py
        │   ├── test_solr_list_collections.py
        │   ├── test_solr_list_fields.py
        │   ├── test_tool_decorator.py
        │   └── test_tools.py
        └── vector_provider
            ├── test_constants.py
            ├── test_exceptions.py
            ├── test_interfaces.py
            └── test_ollama.py
```

# Files

--------------------------------------------------------------------------------
/solr_mcp/solr/schema/fields.py:
--------------------------------------------------------------------------------

```python
  1 | """Schema and field management for SolrCloud client."""
  2 | 
  3 | import json
  4 | import logging
  5 | from typing import Any, Dict, List, Optional
  6 | 
  7 | import aiohttp
  8 | import requests
  9 | from loguru import logger
 10 | from requests.exceptions import HTTPError, RequestException
 11 | 
 12 | from solr_mcp.solr.constants import FIELD_TYPE_MAPPING, SYNTHETIC_SORT_FIELDS
 13 | from solr_mcp.solr.exceptions import SchemaError, SolrError
 14 | from solr_mcp.solr.schema.cache import FieldCache
 15 | 
 16 | logger = logging.getLogger(__name__)
 17 | 
 18 | 
 19 | class FieldManager:
 20 |     """Manages Solr schema fields and field types."""
 21 | 
 22 |     def __init__(self, solr_base_url: str):
 23 |         """Initialize the field manager.
 24 | 
 25 |         Args:
 26 |             solr_base_url: Base URL for Solr instance
 27 |         """
 28 |         self.solr_base_url = (
 29 |             solr_base_url.rstrip("/")
 30 |             if isinstance(solr_base_url, str)
 31 |             else solr_base_url.config.solr_base_url.rstrip("/")
 32 |         )
 33 |         self._schema_cache = {}
 34 |         self._field_types_cache = {}
 35 |         self._vector_field_cache = {}
 36 |         self.cache = FieldCache()
 37 | 
 38 |     def get_schema(self, collection: str) -> Dict:
 39 |         """Get schema for a collection.
 40 | 
 41 |         Args:
 42 |             collection: Collection name
 43 | 
 44 |         Returns:
 45 |             Schema information
 46 | 
 47 |         Raises:
 48 |             SchemaError: If schema cannot be retrieved
 49 |         """
 50 |         if collection in self._schema_cache:
 51 |             return self._schema_cache[collection]
 52 | 
 53 |         try:
 54 |             # Try schema API first
 55 |             url = f"{self.solr_base_url}/{collection}/schema"
 56 |             response = requests.get(url)
 57 |             response.raise_for_status()
 58 |             schema = response.json()
 59 | 
 60 |             if "schema" not in schema:
 61 |                 raise SchemaError("Invalid schema response")
 62 | 
 63 |             self._schema_cache[collection] = schema["schema"]
 64 |             return schema["schema"]
 65 | 
 66 |         except HTTPError as e:
 67 |             if getattr(e.response, "status_code", None) == 404:
 68 |                 raise SchemaError(f"Collection not found: {collection}")
 69 |             raise SchemaError(f"Failed to get schema: {str(e)}")
 70 | 
 71 |         except Exception as e:
 72 |             logger.error(f"Error getting schema: {str(e)}")
 73 |             raise SchemaError(f"Failed to get schema: {str(e)}")
 74 | 
 75 |     def get_field_types(self, collection: str) -> Dict[str, str]:
 76 |         """Get field types for a collection."""
 77 |         if collection in self._field_types_cache:
 78 |             return self._field_types_cache[collection]
 79 | 
 80 |         schema = self.get_schema(collection)
 81 |         field_types = {}
 82 | 
 83 |         # First map field type names to their definitions
 84 |         for field_type in schema.get("fieldTypes", []):
 85 |             field_types[field_type["name"]] = field_type["name"]
 86 | 
 87 |         # Then map fields to their types
 88 |         for field in schema.get("fields", []):
 89 |             if "name" in field and "type" in field:
 90 |                 field_types[field["name"]] = field["type"]
 91 | 
 92 |         self._field_types_cache[collection] = field_types
 93 |         return field_types
 94 | 
 95 |     def get_field_type(self, collection: str, field_name: str) -> str:
 96 |         """Get field type for a specific field."""
 97 |         field_types = self.get_field_types(collection)
 98 |         if field_name not in field_types:
 99 |             raise SchemaError(f"Field not found: {field_name}")
100 |         return field_types[field_name]
101 | 
102 |     def validate_field_exists(self, field: str, collection: str) -> bool:
103 |         """Validate that a field exists in a collection.
104 | 
105 |         Args:
106 |             field: Field name to validate
107 |             collection: Collection name
108 | 
109 |         Returns:
110 |             True if field exists
111 | 
112 |         Raises:
113 |             SchemaError: If field does not exist
114 |         """
115 |         try:
116 |             # Handle wildcard field
117 |             if field == "*":
118 |                 return True
119 | 
120 |             field_info = self.get_field_info(collection)
121 |             if field not in field_info["searchable_fields"]:
122 |                 raise SchemaError(f"Field {field} not found in collection {collection}")
123 | 
124 |             return True
125 | 
126 |         except SchemaError:
127 |             raise
128 |         except Exception as e:
129 |             logger.error(f"Error validating field {field}: {str(e)}")
130 |             raise SchemaError(f"Error validating field {field}: {str(e)}")
131 | 
132 |     def validate_sort_field(self, field: str, collection: str) -> bool:
133 |         """Validate that a field can be used for sorting.
134 | 
135 |         Args:
136 |             field: Field name to validate
137 |             collection: Collection name
138 | 
139 |         Returns:
140 |             True if field is sortable
141 | 
142 |         Raises:
143 |             SchemaError: If field is not sortable
144 |         """
145 |         try:
146 |             field_info = self.get_field_info(collection)
147 |             if field not in field_info["sortable_fields"]:
148 |                 raise SchemaError(
149 |                     f"Field {field} is not sortable in collection {collection}"
150 |                 )
151 | 
152 |             return True
153 | 
154 |         except SchemaError:
155 |             raise
156 |         except Exception as e:
157 |             logger.error(f"Error validating sort field {field}: {str(e)}")
158 |             raise SchemaError(f"Error validating sort field {field}: {str(e)}")
159 | 
160 |     def get_field_info(
161 |         self, collection: str, field: Optional[str] = None
162 |     ) -> Dict[str, Any]:
163 |         """Get field information for a collection.
164 | 
165 |         Args:
166 |             collection: Collection name
167 |             field: Optional field name to get specific info for
168 | 
169 |         Returns:
170 |             Field information including searchable and sortable fields
171 | 
172 |         Raises:
173 |             SchemaError: If field info cannot be retrieved
174 |         """
175 |         try:
176 |             schema = self.get_schema(collection)
177 | 
178 |             # Get all fields
179 |             fields = schema.get("fields", [])
180 | 
181 |             # Build field info
182 |             searchable_fields = []
183 |             sortable_fields = {}
184 | 
185 |             for field_def in fields:
186 |                 name = field_def.get("name")
187 |                 if not name:
188 |                     continue
189 | 
190 |                 # Check if field is searchable
191 |                 if field_def.get("indexed", True):
192 |                     searchable_fields.append(name)
193 | 
194 |                 # Check if field is sortable
195 |                 if field_def.get("docValues", False) or field_def.get("stored", False):
196 |                     sortable_fields[name] = {
197 |                         "type": field_def.get("type", "string"),
198 |                         "searchable": field_def.get("indexed", True),
199 |                         "directions": ["asc", "desc"],
200 |                         "default_direction": "asc",
201 |                     }
202 | 
203 |             # Add special fields
204 |             sortable_fields["_docid_"] = {
205 |                 "type": "numeric",
206 |                 "searchable": False,
207 |                 "directions": ["asc", "desc"],
208 |                 "default_direction": "asc",
209 |             }
210 |             sortable_fields["score"] = {
211 |                 "type": "numeric",
212 |                 "searchable": True,
213 |                 "directions": ["asc", "desc"],
214 |                 "default_direction": "desc",
215 |             }
216 | 
217 |             field_info = {
218 |                 "searchable_fields": searchable_fields,
219 |                 "sortable_fields": sortable_fields,
220 |             }
221 | 
222 |             if field:
223 |                 if field in sortable_fields:
224 |                     return sortable_fields[field]
225 |                 raise SchemaError(f"Field {field} not found in collection {collection}")
226 | 
227 |             return field_info
228 | 
229 |         except SchemaError:
230 |             raise
231 |         except Exception as e:
232 |             logger.error(f"Error getting field info: {str(e)}")
233 |             raise SchemaError(f"Failed to get field info: {str(e)}")
234 | 
235 |     def validate_collection(self, collection: str) -> bool:
236 |         """Validate that a collection exists.
237 | 
238 |         Args:
239 |             collection: Collection name to validate
240 | 
241 |         Returns:
242 |             True if collection exists
243 | 
244 |         Raises:
245 |             SchemaError: If collection does not exist
246 |         """
247 |         try:
248 |             self.get_schema(collection)
249 |             return True
250 | 
251 |         except Exception as e:
252 |             logger.error(f"Error validating collection {collection}: {str(e)}")
253 |             raise SchemaError(f"Collection {collection} does not exist: {str(e)}")
254 | 
255 |     def clear_cache(self, collection: Optional[str] = None):
256 |         """Clear schema cache.
257 | 
258 |         Args:
259 |             collection: Optional collection name to clear cache for. If None, clears all cache.
260 |         """
261 |         if collection:
262 |             self._schema_cache.pop(collection, None)
263 |             self._field_types_cache.pop(collection, None)
264 |         else:
265 |             self._schema_cache = {}
266 |             self._field_types_cache = {}
267 | 
268 |     def _get_collection_fields(self, collection: str) -> Dict[str, Any]:
269 |         """Get or load field information for a collection.
270 | 
271 |         Args:
272 |             collection: Collection name
273 | 
274 |         Returns:
275 |             Dict containing searchable and sortable fields for the collection
276 |         """
277 |         # Check cache first
278 |         if not self.cache.is_stale(collection):
279 |             return self.cache.get(collection)
280 | 
281 |         try:
282 |             searchable_fields = self._get_searchable_fields(collection)
283 |             sortable_fields = self._get_sortable_fields(collection)
284 | 
285 |             field_info = {
286 |                 "searchable_fields": searchable_fields,
287 |                 "sortable_fields": sortable_fields,
288 |             }
289 | 
290 |             # Update cache
291 |             self.cache.set(collection, field_info)
292 | 
293 |             logger.info(f"Loaded field information for collection {collection}")
294 |             logger.debug(f"Searchable fields: {searchable_fields}")
295 |             logger.debug(f"Sortable fields: {sortable_fields}")
296 | 
297 |             return field_info
298 | 
299 |         except Exception as e:
300 |             logger.error(
301 |                 f"Error loading field information for collection {collection}: {e}"
302 |             )
303 |             # Use cached defaults
304 |             return self.cache.get_or_default(collection)
305 | 
306 |     def _get_searchable_fields(self, collection: str) -> List[str]:
307 |         """Get list of searchable fields for a collection.
308 | 
309 |         Args:
310 |             collection: Collection name
311 | 
312 |         Returns:
313 |             List of field names that can be searched
314 |         """
315 |         try:
316 |             # Try schema API first
317 |             schema_url = f"{collection}/schema/fields?wt=json"
318 |             logger.debug(f"Getting searchable fields from schema URL: {schema_url}")
319 |             full_url = f"{self.solr_base_url}/{schema_url}"
320 |             logger.debug(f"Full URL: {full_url}")
321 | 
322 |             response = requests.get(full_url)
323 |             fields_data = response.json()
324 | 
325 |             searchable_fields = []
326 |             for field in fields_data.get("fields", []):
327 |                 field_name = field.get("name")
328 |                 field_type = field.get("type")
329 | 
330 |                 # Skip special fields
331 |                 if field_name.startswith("_") and field_name not in ["_text_"]:
332 |                     continue
333 | 
334 |                 # Add text and string fields
335 |                 if field_type in ["text_general", "string"] or "text" in field_type:
336 |                     logger.debug(
337 |                         f"Found searchable field: {field_name}, type: {field_type}"
338 |                     )
339 |                     searchable_fields.append(field_name)
340 | 
341 |             # Add known content fields
342 |             content_fields = ["content", "title", "_text_"]
343 |             for field in content_fields:
344 |                 if field not in searchable_fields:
345 |                     searchable_fields.append(field)
346 | 
347 |             logger.info(
348 |                 f"Using searchable fields for collection {collection}: {searchable_fields}"
349 |             )
350 |             return searchable_fields
351 | 
352 |         except Exception as e:
353 |             logger.warning(f"Error getting schema fields: {str(e)}")
354 |             logger.info(
355 |                 "Fallback: trying direct URL with query that returns field info"
356 |             )
357 | 
358 |             try:
359 |                 direct_url = (
360 |                     f"{self.solr_base_url}/{collection}/select?q=*:*&rows=0&wt=json"
361 |                 )
362 |                 logger.debug(f"Trying direct URL: {direct_url}")
363 | 
364 |                 response = requests.get(direct_url)
365 |                 response_data = response.json()
366 | 
367 |                 # Extract fields from response header
368 |                 fields = []
369 |                 if "responseHeader" in response_data:
370 |                     header = response_data["responseHeader"]
371 |                     if "params" in header and "fl" in header["params"]:
372 |                         fields = header["params"]["fl"].split(",")
373 | 
374 |                 # Add known searchable fields
375 |                 fields.extend(["content", "title", "_text_"])
376 |                 searchable_fields = list(set(fields))  # Remove duplicates
377 | 
378 |             except Exception as e2:
379 |                 logger.error(f"Error getting searchable fields: {str(e2)}")
380 |                 logger.info(
381 |                     "Using fallback searchable fields: ['content', 'title', '_text_']"
382 |                 )
383 |                 searchable_fields = ["content", "title", "_text_"]
384 | 
385 |             logger.info(
386 |                 f"Using searchable fields for collection {collection}: {searchable_fields}"
387 |             )
388 |             return searchable_fields
389 | 
390 |     def _get_sortable_fields(self, collection: str) -> Dict[str, Dict[str, Any]]:
391 |         """Get list of sortable fields and their properties for a collection.
392 | 
393 |         Args:
394 |             collection: Collection name
395 | 
396 |         Returns:
397 |             Dict mapping field names to their properties
398 |         """
399 |         try:
400 |             # Try schema API first
401 |             schema_url = f"{collection}/schema/fields?wt=json"
402 |             logger.debug(f"Getting sortable fields from schema URL: {schema_url}")
403 |             full_url = f"{self.solr_base_url}/{schema_url}"
404 |             logger.debug(f"Full URL: {full_url}")
405 | 
406 |             response = requests.get(full_url)
407 |             fields_data = response.json()
408 | 
409 |             sortable_fields = {}
410 | 
411 |             # Process schema fields
412 |             for field in fields_data.get("fields", []):
413 |                 field_name = field.get("name")
414 |                 field_type = field.get("type")
415 |                 multi_valued = field.get("multiValued", False)
416 |                 doc_values = field.get("docValues", False)
417 | 
418 |                 # Skip special fields, multi-valued fields, and fields without a recognized type
419 |                 if (
420 |                     (
421 |                         field_name.startswith("_")
422 |                         and field_name not in SYNTHETIC_SORT_FIELDS
423 |                     )
424 |                     or multi_valued
425 |                     or field_type not in FIELD_TYPE_MAPPING
426 |                 ):
427 |                     continue
428 | 
429 |                 # Add field to sortable fields
430 |                 sortable_fields[field_name] = {
431 |                     "type": FIELD_TYPE_MAPPING[field_type],
432 |                     "directions": ["asc", "desc"],
433 |                     "default_direction": (
434 |                         "asc"
435 |                         if FIELD_TYPE_MAPPING[field_type]
436 |                         in ["string", "numeric", "date"]
437 |                         else "desc"
438 |                     ),
439 |                     "searchable": True,  # Regular schema fields are searchable
440 |                 }
441 | 
442 |             # Add synthetic fields
443 |             sortable_fields.update(SYNTHETIC_SORT_FIELDS)
444 | 
445 |             return sortable_fields
446 | 
447 |         except Exception as e:
448 |             logger.error(f"Error getting sortable fields: {e}")
449 |             # Return only the guaranteed score field
450 |             return {"score": SYNTHETIC_SORT_FIELDS["score"]}
451 | 
452 |     def validate_fields(self, collection: str, fields: List[str]) -> None:
453 |         """Validate that the requested fields exist in the collection.
454 | 
455 |         Args:
456 |             collection: Collection name
457 |             fields: List of field names to validate
458 | 
459 |         Raises:
460 |             SchemaError: If any field is not valid for the collection
461 |         """
462 |         collection_info = self._get_collection_fields(collection)
463 |         searchable_fields = collection_info["searchable_fields"]
464 |         sortable_fields = collection_info["sortable_fields"]
465 | 
466 |         # Combine all valid fields
467 |         valid_fields = set(searchable_fields) | set(sortable_fields.keys())
468 | 
469 |         # Check each requested field
470 |         invalid_fields = [f for f in fields if f not in valid_fields]
471 |         if invalid_fields:
472 |             raise SchemaError(
473 |                 f"Invalid fields for collection {collection}: {', '.join(invalid_fields)}"
474 |             )
475 | 
476 |     def validate_sort_fields(self, collection: str, sort_fields: List[str]) -> None:
477 |         """Validate that the requested sort fields are sortable in the collection.
478 | 
479 |         Args:
480 |             collection: Collection name
481 |             sort_fields: List of field names to validate for sorting
482 | 
483 |         Raises:
484 |             SchemaError: If any field is not sortable in the collection
485 |         """
486 |         collection_info = self._get_collection_fields(collection)
487 |         sortable_fields = collection_info["sortable_fields"]
488 | 
489 |         # Check each sort field
490 |         invalid_fields = [f for f in sort_fields if f not in sortable_fields]
491 |         if invalid_fields:
492 |             raise SchemaError(
493 |                 f"Fields not sortable in collection {collection}: {', '.join(invalid_fields)}"
494 |             )
495 | 
496 |     def validate_collection_exists(self, collection: str) -> bool:
497 |         """Validate that a collection exists.
498 | 
499 |         Args:
500 |             collection: Collection name
501 | 
502 |         Returns:
503 |             True if collection exists
504 | 
505 |         Raises:
506 |             SchemaError: If collection does not exist
507 |         """
508 |         try:
509 |             self.get_schema(collection)
510 |             return True
511 | 
512 |         except SchemaError as e:
513 |             if "Collection not found" in str(e):
514 |                 raise
515 |             logger.error(f"Error validating collection: {str(e)}")
516 |             raise SchemaError(f"Error validating collection: {str(e)}")
517 | 
518 |         except Exception as e:
519 |             logger.error(f"Error validating collection: {str(e)}")
520 |             raise SchemaError(f"Error validating collection: {str(e)}")
521 | 
522 |     async def list_fields(self, collection: str) -> List[Dict[str, Any]]:
523 |         """List all fields in a collection with their properties.
524 | 
525 |         Args:
526 |             collection: Collection name
527 | 
528 |         Returns:
529 |             List of field dictionaries with their properties
530 | 
531 |         Raises:
532 |             SchemaError: If fields cannot be retrieved
533 |         """
534 |         try:
535 |             # Verify collection exists
536 |             schema = self.get_schema(collection)
537 | 
538 |             # Get schema fields and copyFields
539 |             fields = schema.get("fields", [])
540 |             copy_fields = schema.get("copyFields", [])
541 | 
542 |             # Build map of destination fields to their source fields
543 |             copies_from = {}
544 |             for copy_field in copy_fields:
545 |                 dest = copy_field.get("dest")
546 |                 source = copy_field.get("source")
547 |                 if not dest or not source:
548 |                     continue
549 |                 if dest not in copies_from:
550 |                     copies_from[dest] = []
551 |                 copies_from[dest].append(source)
552 | 
553 |             # Add copyField information to field properties
554 |             for field in fields:
555 |                 if field.get("name") in copies_from:
556 |                     field["copies_from"] = copies_from[field["name"]]
557 | 
558 |             return fields
559 | 
560 |         except SchemaError:
561 |             raise
562 |         except Exception as e:
563 |             raise SchemaError(
564 |                 f"Failed to list fields for collection '{collection}': {str(e)}"
565 |             )
566 | 
567 |     async def find_vector_field(self, collection: str) -> str:
568 |         """Find the first vector field in a collection.
569 | 
570 |         Args:
571 |             collection: Collection name
572 | 
573 |         Returns:
574 |             Name of the first vector field found
575 | 
576 |         Raises:
577 |             SchemaError: If no vector fields found
578 |         """
579 |         try:
580 |             fields = await self.list_fields(collection)
581 | 
582 |             # Look for vector fields
583 |             vector_fields = [
584 |                 f
585 |                 for f in fields
586 |                 if f.get("type") in ["dense_vector", "knn_vector"]
587 |                 or f.get("class") == "solr.DenseVectorField"
588 |             ]
589 | 
590 |             if not vector_fields:
591 |                 raise SchemaError(
592 |                     f"No vector fields found in collection '{collection}'"
593 |                 )
594 | 
595 |             field = vector_fields[0]["name"]
596 |             logger.info(f"Using auto-detected vector field: {field}")
597 |             return field
598 | 
599 |         except SchemaError:
600 |             raise
601 |         except Exception as e:
602 |             raise SchemaError(
603 |                 f"Failed to find vector field in collection '{collection}': {str(e)}"
604 |             )
605 | 
606 |     async def validate_vector_field_dimension(
607 |         self,
608 |         collection: str,
609 |         field: str,
610 |         vector_provider_model: Optional[str] = None,
611 |         model_dimensions: Optional[Dict[str, int]] = None,
612 |     ) -> Dict[str, Any]:
613 |         """Validate that the vector field exists and its dimension matches the vectorizer.
614 | 
615 |         Args:
616 |             collection: Collection name
617 |             field: Field name to validate
618 |             vector_provider_model: Optional vectorizer model name
619 |             model_dimensions: Dictionary mapping model names to dimensions
620 | 
621 |         Returns:
622 |             Field information dictionary
623 | 
624 |         Raises:
625 |             SchemaError: If validation fails
626 |         """
627 |         # Check cache first
628 |         cache_key = f"{collection}:{field}"
629 |         if cache_key in self._vector_field_cache:
630 |             field_info = self._vector_field_cache[cache_key]
631 |             logger.debug(f"Using cached field info for {cache_key}")
632 |             return field_info
633 | 
634 |         try:
635 |             # Get collection fields
636 |             fields = await self.list_fields(collection)
637 | 
638 |             # Find the specified field
639 |             field_info = next((f for f in fields if f.get("name") == field), None)
640 |             if not field_info:
641 |                 raise SchemaError(
642 |                     f"Field '{field}' does not exist in collection '{collection}'"
643 |                 )
644 | 
645 |             # Check if field is a vector type (supporting both dense_vector and knn_vector)
646 |             field_type = field_info.get("type")
647 |             field_class = field_info.get("class")
648 |             if (
649 |                 field_type not in ["dense_vector", "knn_vector"]
650 |                 and field_class != "solr.DenseVectorField"
651 |             ):
652 |                 raise SchemaError(
653 |                     f"Field '{field}' is not a vector field (type: {field_type}, class: {field_class})"
654 |                 )
655 | 
656 |             # Get field dimension
657 |             vector_dimension = None
658 | 
659 |             # First check if dimension is directly in field info
660 |             if "vectorDimension" in field_info:
661 |                 vector_dimension = field_info["vectorDimension"]
662 |             else:
663 |                 # Look up the field type definition
664 |                 field_type_name = field_info.get("type")
665 | 
666 |                 # Get all field types
667 |                 schema_url = f"{self.solr_base_url}/{collection}/schema"
668 |                 try:
669 |                     schema_response = requests.get(schema_url)
670 |                     schema_data = schema_response.json()
671 |                     field_types = schema_data.get("schema", {}).get("fieldTypes", [])
672 | 
673 |                     # Find matching field type
674 |                     matching_type = next(
675 |                         (ft for ft in field_types if ft.get("name") == field_type_name),
676 |                         None,
677 |                     )
678 | 
679 |                     if matching_type and "vectorDimension" in matching_type:
680 |                         vector_dimension = matching_type["vectorDimension"]
681 |                     elif (
682 |                         matching_type
683 |                         and matching_type.get("class") == "solr.DenseVectorField"
684 |                     ):
685 |                         # For solr.DenseVectorField, dimension should be specified in the field type
686 |                         vector_dimension = matching_type.get("vectorDimension")
687 |                 except Exception as e:
688 |                     logger.warning(
689 |                         f"Error fetching schema to determine vector dimension: {str(e)}"
690 |                     )
691 | 
692 |             # If still not found, attempt to get from fields
693 |             if not vector_dimension:
694 |                 # Look for field types in the fields list that match this type
695 |                 field_types = [
696 |                     f
697 |                     for f in fields
698 |                     if f.get("class") == "solr.DenseVectorField"
699 |                     or (f.get("name") == field_type and "vectorDimension" in f)
700 |                 ]
701 |                 if field_types and "vectorDimension" in field_types[0]:
702 |                     vector_dimension = field_types[0]["vectorDimension"]
703 | 
704 |             # No need to use hardcoded defaults - this should be explicitly defined in the schema
705 | 
706 |             if not vector_dimension:
707 |                 raise SchemaError(
708 |                     f"Could not determine vector dimension for field '{field}' (type: {field_type})"
709 |                 )
710 | 
711 |             # If vector provider model and dimensions are provided, check compatibility
712 |             if vector_provider_model and model_dimensions:
713 |                 model_dimension = model_dimensions.get(vector_provider_model)
714 |                 if model_dimension:
715 |                     # Validate dimensions match
716 |                     if int(vector_dimension) != model_dimension:
717 |                         raise SchemaError(
718 |                             f"Vector dimension mismatch: field '{field}' has dimension {vector_dimension}, "
719 |                             f"but model '{vector_provider_model}' produces vectors with dimension {model_dimension}"
720 |                         )
721 | 
722 |             # Cache the result
723 |             self._vector_field_cache[cache_key] = field_info
724 |             return field_info
725 | 
726 |         except SchemaError:
727 |             raise
728 |         except Exception as e:
729 |             raise SchemaError(f"Error validating vector field dimension: {str(e)}")
730 | 
```

--------------------------------------------------------------------------------
/data/bitcoin-whitepaper.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Bitcoin: A Peer-to-Peer Electronic Cash System
  2 | 
  3 | Satoshi Nakamoto  
  4 | [[email protected]](mailto:[email protected])  
  5 | www.bitcoin.org
  6 | 
  7 | **Abstract.** A purely peer-to-peer version of electronic cash would allow online payments to be sent directly from one party to another without going through a financial institution. Digital signatures provide part of the solution, but the main benefits are lost if a trusted third party is still required to prevent double-spending. We propose a solution to the double-spending problem using a peer-to-peer network. The network timestamps transactions by hashing them into an ongoing chain of hash-based proof-of-work, forming a record that cannot be changed without redoing the proof-of-work. The longest chain not only serves as proof of the sequence of events witnessed, but proof that it came from the largest pool of CPU power. As long as a majority of CPU power is controlled by nodes that are not cooperating to attack the network, they'll generate the longest chain and outpace attackers. The network itself requires minimal structure. Messages are broadcast on a best effort basis, and nodes can leave and rejoin the network at will, accepting the longest proof-of-work chain as proof of what happened while they were gone.
  8 | 
  9 | ## 1. Introduction
 10 | 
 11 | Commerce on the Internet has come to rely almost exclusively on financial institutions serving as trusted third parties to process electronic payments. While the system works well enough for most transactions, it still suffers from the inherent weaknesses of the trust based model. Completely non-reversible transactions are not really possible, since financial institutions cannot avoid mediating disputes. The cost of mediation increases transaction costs, limiting the minimum practical transaction size and cutting off the possibility for small casual transactions, and there is a broader cost in the loss of ability to make non-reversible payments for non-reversible services. With the possibility of reversal, the need for trust spreads. Merchants must be wary of their customers, hassling them for more information than they would otherwise need. A certain percentage of fraud is accepted as unavoidable. These costs and payment uncertainties can be avoided in person by using physical currency, but no mechanism exists to make payments over a communications channel without a trusted party.
 12 | 
 13 | What is needed is an electronic payment system based on cryptographic proof instead of trust, allowing any two willing parties to transact directly with each other without the need for a trusted third party. Transactions that are computationally impractical to reverse would protect sellers from fraud, and routine escrow mechanisms could easily be implemented to protect buyers. In this paper, we propose a solution to the double-spending problem using a peer-to-peer distributed timestamp server to generate computational proof of the chronological order of transactions. The system is secure as long as honest nodes collectively control more CPU power than any cooperating group of attacker nodes.
 14 | 
 15 | ## 2. Transactions
 16 | 
 17 | We define an electronic coin as a chain of digital signatures. Each owner transfers the coin to the next by digitally signing a hash of the previous transaction and the public key of the next owner and adding these to the end of the coin. A payee can verify the signatures to verify the chain of ownership.
 18 | 
 19 | ```
 20 |       ┌─────────────────────┐               ┌─────────────────────┐              ┌─────────────────────┐
 21 |       │                     │               │                     │              │                     │
 22 |       │    Transaction      │               │    Transaction      │              │    Transaction      │
 23 |       │                     │               │                     │              │                     │
 24 |       │   ┌─────────────┐   │               │   ┌─────────────┐   │              │   ┌─────────────┐   │
 25 |       │   │ Owner 1's   │   │               │   │ Owner 2's   │   │              │   │ Owner 3's   │   │
 26 |       │   │ Public Key  │   │               │   │ Public Key  │   │              │   │ Public Key  │   │
 27 |       │   └───────┬─────┘   │               │   └───────┬─────┘   │              │   └───────┬─────┘   │
 28 |       │           │    .    │               │           │    .    │              │           │         │
 29 | ──────┼─────────┐ │    .    ├───────────────┼─────────┐ │    .    ├──────────────┼─────────┐ │         │
 30 |       │         │ │    .    │               │         │ │    .    │              │         │ │         │
 31 |       │      ┌──▼─▼──┐ .    │               │      ┌──▼─▼──┐ .    │              │      ┌──▼─▼──┐      │
 32 |       │      │ Hash  │ .    │               │      │ Hash  │ .    │              │      │ Hash  │      │
 33 |       │      └───┬───┘ .    │    Verify     │      └───┬───┘ .    │    Verify    │      └───┬───┘      │
 34 |       │          │     ............................    │     ...........................    │          │
 35 |       │          │          │               │     │    │          │              │     │    │          │
 36 |       │   ┌──────▼──────┐   │               │   ┌─▼────▼──────┐   │              │   ┌─▼────▼──────┐   │
 37 |       │   │ Owner 0's   │   │      Sign     │   │ Owner 1's   │   │      Sign    │   │ Owner 2's   │   │
 38 |       │   │ Signature   │   │      ...........─►│ Signature   │   │     ...........─►│ Signature   │   │
 39 |       │   └─────────────┘   │      .        │   └─────────────┘   │     .        │   └─────────────┘   │
 40 |       │                     │      .        │                     │     .        │                     │
 41 |       └─────────────────────┘      .        └─────────────────────┘     .        └─────────────────────┘
 42 |                                    .                                    .
 43 |           ┌─────────────┐          .            ┌─────────────┐         .            ┌─────────────┐
 44 |           │ Owner 1's   │...........            │ Owner 2's   │..........            │ Owner 3's   │
 45 |           │ Private Key │                       │ Private Key │                      │ Private Key │
 46 |           └─────────────┘                       └─────────────┘                      └─────────────┘
 47 | ```
 48 | 
 49 | The problem of course is the payee can't verify that one of the owners did not double-spend the coin. A common solution is to introduce a trusted central authority, or mint, that checks every transaction for double spending. After each transaction, the coin must be returned to the mint to issue a new coin, and only coins issued directly from the mint are trusted not to be double-spent. The problem with this solution is that the fate of the entire money system depends on the company running the mint, with every transaction having to go through them, just like a bank.
 50 | 
 51 | We need a way for the payee to know that the previous owners did not sign any earlier transactions. For our purposes, the earliest transaction is the one that counts, so we don't care about later attempts to double-spend. The only way to confirm the absence of a transaction is to be aware of all transactions. In the mint based model, the mint was aware of all transactions and decided which arrived first. To accomplish this without a trusted party, transactions must be publicly announced [^1], and we need a system for participants to agree on a single history of the order in which they were received. The payee needs proof that at the time of each transaction, the majority of nodes agreed it was the first received.
 52 | 
 53 | ## 3. Timestamp Server
 54 | 
 55 | The solution we propose begins with a timestamp server. A timestamp server works by taking a hash of a block of items to be timestamped and widely publishing the hash, such as in a newspaper or Usenet post [^2] [^3] [^4] [^5]. The timestamp proves that the data must have existed at the time, obviously, in order to get into the hash. Each timestamp includes the previous timestamp in its hash, forming a chain, with each additional timestamp reinforcing the ones before it.
 56 | 
 57 | ```
 58 |              ┌──────┐                        ┌──────┐
 59 | ────────────►│      ├───────────────────────►│      ├───────────────────►
 60 |              │ Hash │                        │ Hash │
 61 |         ┌───►│      │                   ┌───►│      │
 62 |         │    └──────┘                   │    └──────┘
 63 |         │                               │
 64 |        ┌┴──────────────────────────┐   ┌┴──────────────────────────┐
 65 |        │ Block                     │   │ Block                     │
 66 |        │ ┌─────┐ ┌─────┐ ┌─────┐   │   │ ┌─────┐ ┌─────┐ ┌─────┐   │
 67 |        │ │Item │ │Item │ │...  │   │   │ │Item │ │Item │ │...  │   │
 68 |        │ └─────┘ └─────┘ └─────┘   │   │ └─────┘ └─────┘ └─────┘   │
 69 |        │                           │   │                           │
 70 |        └───────────────────────────┘   └───────────────────────────┘
 71 | ```
 72 | 
 73 | ## 4. Proof-of-Work
 74 | 
 75 | To implement a distributed timestamp server on a peer-to-peer basis, we will need to use a proof-of-work system similar to Adam Back's Hashcash [^6], rather than newspaper or Usenet posts. The proof-of-work involves scanning for a value that when hashed, such as with SHA-256, the hash begins with a number of zero bits. The average work required is exponential in the number of zero bits required and can be verified by executing a single hash.
 76 | 
 77 | For our timestamp network, we implement the proof-of-work by incrementing a nonce in the block until a value is found that gives the block's hash the required zero bits. Once the CPU effort has been expended to make it satisfy the proof-of-work, the block cannot be changed without redoing the work. As later blocks are chained after it, the work to change the block would include redoing all the blocks after it.
 78 | 
 79 | ```
 80 |        ┌────────────────────────────────────────┐      ┌────────────────────────────────────────┐
 81 |        │  Block                                 │      │  Block                                 │
 82 |        │  ┌──────────────────┐ ┌──────────────┐ │      │  ┌──────────────────┐ ┌──────────────┐ │
 83 | ───────┼─►│ Prev Hash        │ │ Nonce        │ ├──────┼─►│ Prev Hash        │ │ Nonce        │ │
 84 |        │  └──────────────────┘ └──────────────┘ │      │  └──────────────────┘ └──────────────┘ │
 85 |        │                                        │      │                                        │
 86 |        │ ┌──────────┐ ┌──────────┐ ┌──────────┐ │      │ ┌──────────┐ ┌──────────┐ ┌──────────┐ │
 87 |        │ │ Tx       │ │ Tx       │ │ ...      │ │      │ │ Tx       │ │ Tx       │ │ ...      │ │
 88 |        │ └──────────┘ └──────────┘ └──────────┘ │      │ └──────────┘ └──────────┘ └──────────┘ │
 89 |        │                                        │      │                                        │
 90 |        └────────────────────────────────────────┘      └────────────────────────────────────────┘
 91 | ```
 92 | 
 93 | The proof-of-work also solves the problem of determining representation in majority decision making. If the majority were based on one-IP-address-one-vote, it could be subverted by anyone able to allocate many IPs. Proof-of-work is essentially one-CPU-one-vote. The majority decision is represented by the longest chain, which has the greatest proof-of-work effort invested in it. If a majority of CPU power is controlled by honest nodes, the honest chain will grow the fastest and outpace any competing chains. To modify a past block, an attacker would have to redo the proof-of-work of the block and all blocks after it and then catch up with and surpass the work of the honest nodes. We will show later that the probability of a slower attacker catching up diminishes exponentially as subsequent blocks are added.
 94 | 
 95 | To compensate for increasing hardware speed and varying interest in running nodes over time, the proof-of-work difficulty is determined by a moving average targeting an average number of blocks per hour. If they're generated too fast, the difficulty increases.
 96 | 
 97 | ## 5. Network
 98 | 
 99 | The steps to run the network are as follows:
100 | 
101 | 1. New transactions are broadcast to all nodes.
102 | 2. Each node collects new transactions into a block.
103 | 3. Each node works on finding a difficult proof-of-work for its block.
104 | 4. When a node finds a proof-of-work, it broadcasts the block to all nodes.
105 | 5. Nodes accept the block only if all transactions in it are valid and not already spent.
106 | 6. Nodes express their acceptance of the block by working on creating the next block in the chain, using the hash of the accepted block as the previous hash.
107 | 
108 | Nodes always consider the longest chain to be the correct one and will keep working on extending it. If two nodes broadcast different versions of the next block simultaneously, some nodes may receive one or the other first. In that case, they work on the first one they received, but save the other branch in case it becomes longer. The tie will be broken when the next proof-of-work is found and one branch becomes longer; the nodes that were working on the other branch will then switch to the longer one.
109 | 
110 | New transaction broadcasts do not necessarily need to reach all nodes. As long as they reach many nodes, they will get into a block before long. Block broadcasts are also tolerant of dropped messages. If a node does not receive a block, it will request it when it receives the next block and realizes it missed one.
111 | 
112 | ## 6. Incentive
113 | 
114 | By convention, the first transaction in a block is a special transaction that starts a new coin owned by the creator of the block. This adds an incentive for nodes to support the network, and provides a way to initially distribute coins into circulation, since there is no central authority to issue them. The steady addition of a constant of amount of new coins is analogous to gold miners expending resources to add gold to circulation. In our case, it is CPU time and electricity that is expended.
115 | 
116 | The incentive can also be funded with transaction fees. If the output value of a transaction is less than its input value, the difference is a transaction fee that is added to the incentive value of the block containing the transaction. Once a predetermined number of coins have entered circulation, the incentive can transition entirely to transaction fees and be completely inflation free.
117 | 
118 | The incentive may help encourage nodes to stay honest. If a greedy attacker is able to assemble more CPU power than all the honest nodes, he would have to choose between using it to defraud people by stealing back his payments, or using it to generate new coins. He ought to find it more profitable to play by the rules, such rules that favour him with more new coins than everyone else combined, than to undermine the system and the validity of his own wealth.
119 | 
120 | ## 7. Reclaiming Disk Space
121 | 
122 | Once the latest transaction in a coin is buried under enough blocks, the spent transactions before it can be discarded to save disk space. To facilitate this without breaking the block's hash, transactions are hashed in a Merkle Tree [^7] [^2] [^5], with only the root included in the block's hash. Old blocks can then be compacted by stubbing off branches of the tree. The interior hashes do not need to be stored.
123 | 
124 | ```
125 | ┌──────────────────────────────────────────┐    ┌──────────────────────────────────────────┐
126 | │                                          │    │                                          │
127 | │ Block ┌─────────────────────────────┐    │    │ Block ┌─────────────────────────────┐    │
128 | │       │  Block Header (Block Hash)  │    │    │       │  Block Header (Block Hash)  │    │
129 | │       │ ┌────────────┐ ┌─────────┐  │    │    │       │ ┌────────────┐ ┌─────────┐  │    │
130 | │       │ │ Prev Hash  │ │ Nonce   │  │    │    │       │ │ Prev Hash  │ │ Nonce   │  │    │
131 | │       │ └────────────┘ └─────────┘  │    │    │       │ └────────────┘ └─────────┘  │    │
132 | │       │                             │    │    │       │                             │    │
133 | │       │     ┌─────────────┐         │    │    │       │     ┌─────────────┐         │    │
134 | │       │     │  Root Hash  │         │    │    │       │     │  Root Hash  │         │    │
135 | │       │     └─────▲─▲─────┘         │    │    │       │     └─────▲─▲─────┘         │    │
136 | │       │           │ │               │    │    │       │           │ │               │    │
137 | │       │           │ │               │    │    │       │           │ │               │    │
138 | │       └───────────┼─┼───────────────┘    │    │       └───────────┼─┼───────────────┘    │
139 | │                   │ │                    │    │                   │ │                    │
140 | │     ..........    │ │     ..........     │    │     ┌────────┐    │ │     ..........     │
141 | │     .        ─────┘ └─────.        .     │    │     │        ├────┘ └─────.        .     │
142 | │     . Hash01 .            . Hash23 .     │    │     │ Hash01 │            . Hash23 .     │
143 | │     .▲.....▲..            .▲.....▲..     │    │     │        │            .▲.....▲..     │
144 | │      │     │               │     │       │    │     └────────┘             │     │       │
145 | │      │     │               │     │       │    │                            │     │       │
146 | │      │     │               │     │       │    │                            │     │       │
147 | │ .....│.. ..│.....     .....│.. ..│.....  │    │                       ┌────┴─┐ ..│.....  │
148 | │ .      . .      .     .      . .      .  │    │                       │      │ .      .  │
149 | │ .Hash0 . .Hash1 .     .Hash2 . .Hash3 .  │    │                       │Hash2 │ .Hash3 .  │
150 | │ ...▲.... ...▲....     ...▲.... ...▲....  │    │                       │      │ .      .  │
151 | │    │        │            │        │      │    │                       └──────┘ ...▲....  │
152 | │    │        │            │        │      │    │                                   │      │
153 | │    │        │            │        │      │    │                                   │      │
154 | │ ┌──┴───┐ ┌──┴───┐     ┌──┴───┐ ┌──┴───┐  │    │                                ┌──┴───┐  │
155 | │ │ Tx0  │ │ Tx1  │     │ Tx2  │ │ Tx3  │  │    │                                │ Tx3  │  │
156 | │ └──────┘ └──────┘     └──────┘ └──────┘  │    │                                └──────┘  │
157 | │                                          │    │                                          │
158 | └──────────────────────────────────────────┘    └──────────────────────────────────────────┘
159 |      Transactions Hashed in a Merkle Tree              After Pruning Tx0-2 from the Block
160 | ```
161 | 
162 | A block header with no transactions would be about 80 bytes. If we suppose blocks are generated every 10 minutes, 80 bytes * 6 * 24 * 365 = 4.2MB per year. With computer systems typically selling with 2GB of RAM as of 2008, and Moore's Law predicting current growth of 1.2GB per year, storage should not be a problem even if the block headers must be kept in memory.
163 | 
164 | ## 8. Simplified Payment Verification
165 | 
166 | It is possible to verify payments without running a full network node. A user only needs to keep a copy of the block headers of the longest proof-of-work chain, which he can get by querying network nodes until he's convinced he has the longest chain, and obtain the Merkle branch linking the transaction to the block it's timestamped in. He can't check the transaction for himself, but by linking it to a place in the chain, he can see that a network node has accepted it, and blocks added after it further confirm the network has accepted it.
167 | 
168 | ```
169 |      Longest Proof-of-Work Chain
170 |         ┌────────────────────────────────────────┐      ┌────────────────────────────────────────┐       ┌────────────────────────────────────────┐
171 |         │   Block Header                         │      │   Block Header                         │       │   Block Header                         │
172 |         │  ┌──────────────────┐ ┌──────────────┐ │      │  ┌──────────────────┐ ┌──────────────┐ │       │  ┌──────────────────┐ ┌──────────────┐ │
173 |  ───────┼─►│ Prev Hash        │ │ Nonce        │ ├──────┼─►│ Prev Hash        │ │ Nonce        │ ├───────┼─►│ Prev Hash        │ │ Nonce        │ ├────────►
174 |         │  └──────────────────┘ └──────────────┘ │      │  └──────────────────┘ └──────────────┘ │       │  └──────────────────┘ └──────────────┘ │
175 |         │                                        │      │                                        │       │                                        │
176 |         │     ┌───────────────────┐              │      │    ┌────────────────────┐              │       │     ┌───────────────────┐              │
177 |         │     │   Merkle Root     │              │      │    │   Merkle Root      │              │       │     │   Merkle Root     │              │
178 |         │     └───────────────────┘              │      │    └────────▲─▲─────────┘              │       │     └───────────────────┘              │
179 |         │                                        │      │             │ │                        │       │                                        │
180 |         └────────────────────────────────────────┘      └─────────────┼─┼────────────────────────┘       └────────────────────────────────────────┘
181 |                                                                       │ │
182 |                                                                       │ │
183 |                                                         ┌────────┐    │ │     ..........
184 |                                                         │        ├────┘ └─────.        .
185 |                                                         │ Hash01 │            . Hash23 .
186 |                                                         │        │            .▲.....▲..
187 |                                                         └────────┘             │     │
188 |                                                                                │     │
189 |                                                                                │     │   Merkle Branch for Tx3
190 |                                                                                │     │
191 |                                                                          ┌─────┴─┐ ..│.....
192 |                                                                          │       │ .      .
193 |                                                                          │ Hash2 │ .Hash3 .
194 |                                                                          │       │ .      .
195 |                                                                          └───────┘ ...▲....
196 |                                                                                       │
197 |                                                                                       │
198 |                                                                                   ┌───┴───┐
199 |                                                                                   │  Tx3  │
200 |                                                                                   └───────┘
201 | ```
202 | 
203 | As such, the verification is reliable as long as honest nodes control the network, but is more vulnerable if the network is overpowered by an attacker. While network nodes can verify transactions for themselves, the simplified method can be fooled by an attacker's fabricated transactions for as long as the attacker can continue to overpower the network. One strategy to protect against this would be to accept alerts from network nodes when they detect an invalid block, prompting the user's software to download the full block and alerted transactions to confirm the inconsistency. Businesses that receive frequent payments will probably still want to run their own nodes for more independent security and quicker verification.
204 | 
205 | ## 9. Combining and Splitting Value
206 | 
207 | Although it would be possible to handle coins individually, it would be unwieldy to make a separate transaction for every cent in a transfer. To allow value to be split and combined, transactions contain multiple inputs and outputs. Normally there will be either a single input from a larger previous transaction or multiple inputs combining smaller amounts, and at most two outputs: one for the payment, and one returning the change, if any, back to the sender.
208 | 
209 | ```
210 |      ┌──────────────────────┐
211 |      │ Transaction          │
212 |      │                      │
213 |      │   ┌─────┐  ┌─────┐   │
214 | ─────┼──►│ in  │  │ out │ ──┼─────►
215 |      │   └─────┘  └─────┘   │
216 |      │                      │
217 |      │                      │
218 |      │   ┌─────┐  ┌─────┐   │
219 | ─────┼──►│ in  │  │ ... │ ──┼─────►
220 |      │   └─────┘  └─────┘   │
221 |      │                      │
222 |      │                      │
223 |      │   ┌─────┐            │
224 | ─────┼──►│...  │            │
225 |      │   └─────┘            │
226 |      │                      │
227 |      └──────────────────────┘
228 | ```
229 | It should be noted that fan-out, where a transaction depends on several transactions, and those transactions depend on many more, is not a problem here. There is never the need to extract a complete standalone copy of a transaction's history.
230 | 
231 | ## 10. Privacy
232 | 
233 | The traditional banking model achieves a level of privacy by limiting access to information to the parties involved and the trusted third party. The necessity to announce all transactions publicly precludes this method, but privacy can still be maintained by breaking the flow of information in another place: by keeping public keys anonymous. The public can see that someone is sending an amount to someone else, but without information linking the transaction to anyone. This is similar to the level of information released by stock exchanges, where the time and size of individual trades, the "tape", is made public, but without telling who the parties were.
234 | 
235 | ```
236 | Traditional Privacy Models                                                │
237 |                                       ┌─────────────┐   ┌──────────────┐  │  ┌────────┐
238 | ┌──────────────┐  ┌──────────────┐    │  Trusted    │   │              │  │  │        │
239 | │  Identities  ├──┤ Transactions ├───►│ Third Party ├──►│ Counterparty │  │  │ Public │
240 | └──────────────┘  └──────────────┘    │             │   │              │  │  │        │
241 |                                       └─────────────┘   └──────────────┘  │  └────────┘
242 |                                                                           │
243 | 
244 | New Privacy Model
245 |                                        ┌────────┐
246 | ┌──────────────┐ │ ┌──────────────┐    │        │
247 | │  Identities  │ │ │ Transactions ├───►│ Public │
248 | └──────────────┘ │ └──────────────┘    │        │
249 |                                        └────────┘
250 | ```
251 | As an additional firewall, a new key pair should be used for each transaction to keep them from being linked to a common owner. Some linking is still unavoidable with multi-input transactions, which necessarily reveal that their inputs were owned by the same owner. The risk is that if the owner of a key is revealed, linking could reveal other transactions that belonged to the same owner.
252 | 
253 | ## 11. Calculations
254 | We consider the scenario of an attacker trying to generate an alternate chain faster than the honest chain. Even if this is accomplished, it does not throw the system open to arbitrary changes, such as creating value out of thin air or taking money that never belonged to the attacker. Nodes are not going to accept an invalid transaction as payment, and honest nodes will never accept a block containing them. An attacker can only try to change one of his own transactions to take back money he recently spent.
255 | 
256 | The race between the honest chain and an attacker chain can be characterized as a Binomial Random Walk. The success event is the honest chain being extended by one block, increasing its lead by +1, and the failure event is the attacker's chain being extended by one block, reducing the gap by -1.
257 | 
258 | The probability of an attacker catching up from a given deficit is analogous to a Gambler's Ruin problem. Suppose a gambler with unlimited credit starts at a deficit and plays potentially an infinite number of trials to try to reach breakeven. We can calculate the probability he ever reaches breakeven, or that an attacker ever catches up with the honest chain, as follows [^8]:
259 | 
260 | ```plaintext
261 | p = probability an honest node finds the next block<
262 | q = probability the attacker finds the next block
263 | q = probability the attacker will ever catch up from z blocks behind
264 | ``````
265 |      
266 | $$
267 | qz = 
268 | \begin{cases} 
269 | 1 & \text{if } p \leq q \\
270 | \left(\frac{q}{p}\right) z & \text{if } p > q 
271 | \end{cases}
272 | $$
273 | 
274 | Given our assumption that p > q, the probability drops exponentially as the number of blocks the attacker has to catch up with increases. With the odds against him, if he doesn't make a lucky lunge forward early on, his chances become vanishingly small as he falls further behind. 
275 | 
276 | We now consider how long the recipient of a new transaction needs to wait before being sufficiently certain the sender can't change the transaction. We assume the sender is an attacker who wants to make the recipient believe he paid him for a while, then switch it to pay back to himself after some time has passed. The receiver will be alerted when that happens, but the sender hopes it will be too late.
277 | 
278 | The receiver generates a new key pair and gives the public key to the sender shortly before signing. This prevents the sender from preparing a chain of blocks ahead of time by working on it continuously until he is lucky enough to get far enough ahead, then executing the transaction at that moment. Once the transaction is sent, the dishonest sender starts working in secret on a parallel chain containing an alternate version of his transaction.
279 | 
280 | The recipient waits until the transaction has been added to a block and z blocks have been linked after it. He doesn't know the exact amount of progress the attacker has made, but assuming the honest blocks took the average expected time per block, the attacker's potential progress will be a Poisson distribution with expected value:
281 | 
282 | $$
283 | \lambda = z\frac{q}{p}
284 | $$
285 | 
286 | To get the probability the attacker could still catch up now, we multiply the Poisson density for each amount of progress he could have made by the probability he could catch up from that point:
287 | 
288 | $$
289 | \sum_{k=0}^{\infty} \frac{\lambda^k e^{-\lambda}}{k!} \cdot \left\{ 
290 | \begin{array}{cl} 
291 | \left(\frac{q}{p}\right)^{(z-k)} & \text{if } k \leq z \\
292 | 1 & \text{if } k > z 
293 | \end{array}
294 | \right.
295 | $$
296 | 
297 | Rearranging to avoid summing the infinite tail of the distribution...
298 | 
299 | $$
300 | 1 - \sum_{k=0}^{z} \frac{\lambda^k e^{-\lambda}}{k!} \left(1-\left(\frac{q}{p}\right)^{(z-k)}\right)
301 | $$
302 | 
303 | Converting to C code...
304 | 
305 | ```c
306 | #include <math.h>
307 | 
308 | double AttackerSuccessProbability(double q, int z)
309 | {
310 |     double p = 1.0 - q;
311 |     double lambda = z * (q / p);
312 |     double sum = 1.0;
313 |     int i, k;
314 |     for (k = 0; k <= z; k++)
315 |     {
316 |         double poisson = exp(-lambda);
317 |         for (i = 1; i <= k; i++)
318 |             poisson *= lambda / i;
319 |         sum -= poisson * (1 - pow(q / p, z - k));
320 |     }
321 |     return sum;
322 | }
323 | ```
324 | Running some results, we can see the probability drop off exponentially with z.
325 | 
326 | ```plaintext
327 | q=0.1
328 | z=0 P=1.0000000
329 | z=1 P=0.2045873
330 | z=2 P=0.0509779
331 | z=3 P=0.0131722
332 | z=4 P=0.0034552
333 | z=5 P=0.0009137
334 | z=6 P=0.0002428
335 | z=7 P=0.0000647
336 | z=8 P=0.0000173
337 | z=9 P=0.0000046
338 | z=10 P=0.0000012
339 | 
340 | q=0.3
341 | z=0 P=1.0000000
342 | z=5 P=0.1773523
343 | z=10 P=0.0416605
344 | z=15 P=0.0101008
345 | z=20 P=0.0024804
346 | z=25 P=0.0006132
347 | z=30 P=0.0001522
348 | z=35 P=0.0000379
349 | z=40 P=0.0000095
350 | z=45 P=0.0000024
351 | z=50 P=0.0000006
352 | ```
353 | Solving for P less than 0.1%...
354 | ```plaintext
355 | P < 0.001
356 | q=0.10 z=5
357 | q=0.15 z=8
358 | q=0.20 z=11
359 | q=0.25 z=15
360 | q=0.30 z=24
361 | q=0.35 z=41
362 | q=0.40 z=89
363 | q=0.45 z=340
364 | ```
365 | ## 12. Conclusion
366 | We have proposed a system for electronic transactions without relying on trust. We started with the usual framework of coins made from digital signatures, which provides strong control of ownership, but is incomplete without a way to prevent double-spending. To solve this, we proposed a peer-to-peer network using proof-of-work to record a public history of transactions that quickly becomes computationally impractical for an attacker to change if honest nodes control a majority of CPU power. The network is robust in its unstructured simplicity. Nodes work all at once with little coordination. They do not need to be identified, since messages are not routed to any particular place and only need to be delivered on a best effort basis. Nodes can leave and rejoin the network at will, accepting the proof-of-work chain as proof of what happened while they were gone. They vote with their CPU power, expressing their acceptance of valid blocks by working on extending them and rejecting invalid blocks by refusing to work on them. Any needed rules and incentives can be enforced with this consensus mechanism.
367 | <br>
368 | 
369 | ### References
370 | ---
371 | [^1]: W. Dai, "b-money," http://www.weidai.com/bmoney.txt, 1998.
372 | [^2]: H. Massias, X.S. Avila, and J.-J. Quisquater, "Design of a secure timestamping service with minimal
373 | trust requirements," In 20th Symposium on Information Theory in the Benelux, May 1999.
374 | [^3]: S. Haber, W.S. Stornetta, "How to time-stamp a digital document," In Journal of Cryptology, vol 3, no
375 | 2, pages 99-111, 1991.
376 | [^4]: D. Bayer, S. Haber, W.S. Stornetta, "Improving the efficiency and reliability of digital time-stamping,"
377 | In Sequences II: Methods in Communication, Security and Computer Science, pages 329-334, 1993.
378 | [^5]: S. Haber, W.S. Stornetta, "Secure names for bit-strings," In Proceedings of the 4th ACM Conference
379 | on Computer and Communications Security, pages 28-35, April 1997.
380 | [^6]: A. Back, "Hashcash - a denial of service counter-measure,"
381 | http://www.hashcash.org/papers/hashcash.pdf, 2002.
382 | [^7]: R.C. Merkle, "Protocols for public key cryptosystems," In Proc. 1980 Symposium on Security and
383 | Privacy, IEEE Computer Society, pages 122-133, April 1980.
384 | [^8]: W. Feller, "An introduction to probability theory and its applications," 1957.
385 | 
```

--------------------------------------------------------------------------------
/data/bitcoin-whitepaper.json:
--------------------------------------------------------------------------------

```json
  1 | [
  2 |   {
  3 |     "id": "bitcoin-whitepaper.md_section_0",
  4 |     "title": "Bitcoin: A Peer-to-Peer Electronic Cash System",
  5 |     "text": "Satoshi Nakamoto  \n[[email protected]](mailto:[email protected])  \nwww.bitcoin.org\n\n**Abstract.** A purely peer-to-peer version of electronic cash would allow online payments to be sent directly from one party to another without going through a financial institution. Digital signatures provide part of the solution, but the main benefits are lost if a trusted third party is still required to prevent double-spending. We propose a solution to the double-spending problem using a peer-to-peer network. The network timestamps transactions by hashing them into an ongoing chain of hash-based proof-of-work, forming a record that cannot be changed without redoing the proof-of-work. The longest chain not only serves as proof of the sequence of events witnessed, but proof that it came from the largest pool of CPU power. As long as a majority of CPU power is controlled by nodes that are not cooperating to attack the network, they'll generate the longest chain and outpace attackers. The network itself requires minimal structure. Messages are broadcast on a best effort basis, and nodes can leave and rejoin the network at will, accepting the longest proof-of-work chain as proof of what happened while they were gone.",
  6 |     "source": "data/bitcoin-whitepaper.md",
  7 |     "section_number": 0,
  8 |     "date_indexed": "2025-03-20T15:28:42.816825",
  9 |     "tags": [],
 10 |     "category": []
 11 |   },
 12 |   {
 13 |     "id": "bitcoin-whitepaper.md_section_1",
 14 |     "title": "1. Introduction",
 15 |     "text": "Commerce on the Internet has come to rely almost exclusively on financial institutions serving as trusted third parties to process electronic payments. While the system works well enough for most transactions, it still suffers from the inherent weaknesses of the trust based model. Completely non-reversible transactions are not really possible, since financial institutions cannot avoid mediating disputes. The cost of mediation increases transaction costs, limiting the minimum practical transaction size and cutting off the possibility for small casual transactions, and there is a broader cost in the loss of ability to make non-reversible payments for non-reversible services. With the possibility of reversal, the need for trust spreads. Merchants must be wary of their customers, hassling them for more information than they would otherwise need. A certain percentage of fraud is accepted as unavoidable. These costs and payment uncertainties can be avoided in person by using physical currency, but no mechanism exists to make payments over a communications channel without a trusted party.\n\nWhat is needed is an electronic payment system based on cryptographic proof instead of trust, allowing any two willing parties to transact directly with each other without the need for a trusted third party. Transactions that are computationally impractical to reverse would protect sellers from fraud, and routine escrow mechanisms could easily be implemented to protect buyers. In this paper, we propose a solution to the double-spending problem using a peer-to-peer distributed timestamp server to generate computational proof of the chronological order of transactions. The system is secure as long as honest nodes collectively control more CPU power than any cooperating group of attacker nodes.",
 16 |     "source": "data/bitcoin-whitepaper.md",
 17 |     "section_number": 1,
 18 |     "date_indexed": "2025-03-20T15:28:42.816838",
 19 |     "tags": [],
 20 |     "category": []
 21 |   },
 22 |   {
 23 |     "id": "bitcoin-whitepaper.md_section_2",
 24 |     "title": "2. Transactions",
 25 |     "text": "We define an electronic coin as a chain of digital signatures. Each owner transfers the coin to the next by digitally signing a hash of the previous transaction and the public key of the next owner and adding these to the end of the coin. A payee can verify the signatures to verify the chain of ownership.\n\n```\n      \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510               \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510              \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n      \u2502                     \u2502               \u2502                     \u2502              \u2502                     \u2502\n      \u2502    Transaction      \u2502               \u2502    Transaction      \u2502              \u2502    Transaction      \u2502\n      \u2502                     \u2502               \u2502                     \u2502              \u2502                     \u2502\n      \u2502   \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510   \u2502               \u2502   \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510   \u2502              \u2502   \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510   \u2502\n      \u2502   \u2502 Owner 1's   \u2502   \u2502               \u2502   \u2502 Owner 2's   \u2502   \u2502              \u2502   \u2502 Owner 3's   \u2502   \u2502\n      \u2502   \u2502 Public Key  \u2502   \u2502               \u2502   \u2502 Public Key  \u2502   \u2502              \u2502   \u2502 Public Key  \u2502   \u2502\n      \u2502   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2518   \u2502               \u2502   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2518   \u2502              \u2502   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2518   \u2502\n      \u2502           \u2502    .    \u2502               \u2502           \u2502    .    \u2502              \u2502           \u2502         \u2502\n\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502    .    \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502    .    \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502         \u2502\n      \u2502         \u2502 \u2502    .    \u2502               \u2502         \u2502 \u2502    .    \u2502              \u2502         \u2502 \u2502         \u2502\n      \u2502      \u250c\u2500\u2500\u25bc\u2500\u25bc\u2500\u2500\u2510 .    \u2502               \u2502      \u250c\u2500\u2500\u25bc\u2500\u25bc\u2500\u2500\u2510 .    \u2502              \u2502      \u250c\u2500\u2500\u25bc\u2500\u25bc\u2500\u2500\u2510      \u2502\n      \u2502      \u2502 Hash  \u2502 .    \u2502               \u2502      \u2502 Hash  \u2502 .    \u2502              \u2502      \u2502 Hash  \u2502      \u2502\n      \u2502      \u2514\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2518 .    \u2502    Verify     \u2502      \u2514\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2518 .    \u2502    Verify    \u2502      \u2514\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2518      \u2502\n      \u2502          \u2502     ............................    \u2502     ...........................    \u2502          \u2502\n      \u2502          \u2502          \u2502               \u2502     \u2502    \u2502          \u2502              \u2502     \u2502    \u2502          \u2502\n      \u2502   \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u25bc\u2500\u2500\u2500\u2500\u2500\u2500\u2510   \u2502               \u2502   \u250c\u2500\u25bc\u2500\u2500\u2500\u2500\u25bc\u2500\u2500\u2500\u2500\u2500\u2500\u2510   \u2502              \u2502   \u250c\u2500\u25bc\u2500\u2500\u2500\u2500\u25bc\u2500\u2500\u2500\u2500\u2500\u2500\u2510   \u2502\n      \u2502   \u2502 Owner 0's   \u2502   \u2502      Sign     \u2502   \u2502 Owner 1's   \u2502   \u2502      Sign    \u2502   \u2502 Owner 2's   \u2502   \u2502\n      \u2502   \u2502 Signature   \u2502   \u2502      ...........\u2500\u25ba\u2502 Signature   \u2502   \u2502     ...........\u2500\u25ba\u2502 Signature   \u2502   \u2502\n      \u2502   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518   \u2502      .        \u2502   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518   \u2502     .        \u2502   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518   \u2502\n      \u2502                     \u2502      .        \u2502                     \u2502     .        \u2502                     \u2502\n      \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518      .        \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518     .        \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n                                   .                                    .\n          \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510          .            \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510         .            \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n          \u2502 Owner 1's   \u2502...........            \u2502 Owner 2's   \u2502..........            \u2502 Owner 3's   \u2502\n          \u2502 Private Key \u2502                       \u2502 Private Key \u2502                      \u2502 Private Key \u2502\n          \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518                       \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518                      \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n```\n\nThe problem of course is the payee can't verify that one of the owners did not double-spend the coin. A common solution is to introduce a trusted central authority, or mint, that checks every transaction for double spending. After each transaction, the coin must be returned to the mint to issue a new coin, and only coins issued directly from the mint are trusted not to be double-spent. The problem with this solution is that the fate of the entire money system depends on the company running the mint, with every transaction having to go through them, just like a bank.\n\nWe need a way for the payee to know that the previous owners did not sign any earlier transactions. For our purposes, the earliest transaction is the one that counts, so we don't care about later attempts to double-spend. The only way to confirm the absence of a transaction is to be aware of all transactions. In the mint based model, the mint was aware of all transactions and decided which arrived first. To accomplish this without a trusted party, transactions must be publicly announced [^1], and we need a system for participants to agree on a single history of the order in which they were received. The payee needs proof that at the time of each transaction, the majority of nodes agreed it was the first received.",
 26 |     "source": "data/bitcoin-whitepaper.md",
 27 |     "section_number": 2,
 28 |     "date_indexed": "2025-03-20T15:28:42.816840",
 29 |     "tags": [],
 30 |     "category": []
 31 |   },
 32 |   {
 33 |     "id": "bitcoin-whitepaper.md_section_3",
 34 |     "title": "3. Timestamp Server",
 35 |     "text": "The solution we propose begins with a timestamp server. A timestamp server works by taking a hash of a block of items to be timestamped and widely publishing the hash, such as in a newspaper or Usenet post [^2] [^3] [^4] [^5]. The timestamp proves that the data must have existed at the time, obviously, in order to get into the hash. Each timestamp includes the previous timestamp in its hash, forming a chain, with each additional timestamp reinforcing the ones before it.\n\n```\n             \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2510                        \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25ba\u2502      \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25ba\u2502      \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25ba\n             \u2502 Hash \u2502                        \u2502 Hash \u2502\n        \u250c\u2500\u2500\u2500\u25ba\u2502      \u2502                   \u250c\u2500\u2500\u2500\u25ba\u2502      \u2502\n        \u2502    \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2518                   \u2502    \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n        \u2502                               \u2502\n       \u250c\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510   \u250c\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n       \u2502 Block                     \u2502   \u2502 Block                     \u2502\n       \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2510   \u2502   \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2510   \u2502\n       \u2502 \u2502Item \u2502 \u2502Item \u2502 \u2502...  \u2502   \u2502   \u2502 \u2502Item \u2502 \u2502Item \u2502 \u2502...  \u2502   \u2502\n       \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2518   \u2502   \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2518   \u2502\n       \u2502                           \u2502   \u2502                           \u2502\n       \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n```",
 36 |     "source": "data/bitcoin-whitepaper.md",
 37 |     "section_number": 3,
 38 |     "date_indexed": "2025-03-20T15:28:42.816841",
 39 |     "tags": [],
 40 |     "category": []
 41 |   },
 42 |   {
 43 |     "id": "bitcoin-whitepaper.md_section_4",
 44 |     "title": "4. Proof-of-Work",
 45 |     "text": "To implement a distributed timestamp server on a peer-to-peer basis, we will need to use a proof-of-work system similar to Adam Back's Hashcash [^6], rather than newspaper or Usenet posts. The proof-of-work involves scanning for a value that when hashed, such as with SHA-256, the hash begins with a number of zero bits. The average work required is exponential in the number of zero bits required and can be verified by executing a single hash.\n\nFor our timestamp network, we implement the proof-of-work by incrementing a nonce in the block until a value is found that gives the block's hash the required zero bits. Once the CPU effort has been expended to make it satisfy the proof-of-work, the block cannot be changed without redoing the work. As later blocks are chained after it, the work to change the block would include redoing all the blocks after it.\n\n```\n       \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510      \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n       \u2502  Block                                 \u2502      \u2502  Block                                 \u2502\n       \u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502      \u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502\n\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u25ba\u2502 Prev Hash        \u2502 \u2502 Nonce        \u2502 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u25ba\u2502 Prev Hash        \u2502 \u2502 Nonce        \u2502 \u2502\n       \u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502      \u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n       \u2502                                        \u2502      \u2502                                        \u2502\n       \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502      \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502\n       \u2502 \u2502 Tx       \u2502 \u2502 Tx       \u2502 \u2502 ...      \u2502 \u2502      \u2502 \u2502 Tx       \u2502 \u2502 Tx       \u2502 \u2502 ...      \u2502 \u2502\n       \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502      \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n       \u2502                                        \u2502      \u2502                                        \u2502\n       \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518      \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n```\n\nThe proof-of-work also solves the problem of determining representation in majority decision making. If the majority were based on one-IP-address-one-vote, it could be subverted by anyone able to allocate many IPs. Proof-of-work is essentially one-CPU-one-vote. The majority decision is represented by the longest chain, which has the greatest proof-of-work effort invested in it. If a majority of CPU power is controlled by honest nodes, the honest chain will grow the fastest and outpace any competing chains. To modify a past block, an attacker would have to redo the proof-of-work of the block and all blocks after it and then catch up with and surpass the work of the honest nodes. We will show later that the probability of a slower attacker catching up diminishes exponentially as subsequent blocks are added.\n\nTo compensate for increasing hardware speed and varying interest in running nodes over time, the proof-of-work difficulty is determined by a moving average targeting an average number of blocks per hour. If they're generated too fast, the difficulty increases.",
 46 |     "source": "data/bitcoin-whitepaper.md",
 47 |     "section_number": 4,
 48 |     "date_indexed": "2025-03-20T15:28:42.816842",
 49 |     "tags": [],
 50 |     "category": []
 51 |   },
 52 |   {
 53 |     "id": "bitcoin-whitepaper.md_section_5",
 54 |     "title": "5. Network",
 55 |     "text": "The steps to run the network are as follows:\n\n1. New transactions are broadcast to all nodes.\n2. Each node collects new transactions into a block.\n3. Each node works on finding a difficult proof-of-work for its block.\n4. When a node finds a proof-of-work, it broadcasts the block to all nodes.\n5. Nodes accept the block only if all transactions in it are valid and not already spent.\n6. Nodes express their acceptance of the block by working on creating the next block in the chain, using the hash of the accepted block as the previous hash.\n\nNodes always consider the longest chain to be the correct one and will keep working on extending it. If two nodes broadcast different versions of the next block simultaneously, some nodes may receive one or the other first. In that case, they work on the first one they received, but save the other branch in case it becomes longer. The tie will be broken when the next proof-of-work is found and one branch becomes longer; the nodes that were working on the other branch will then switch to the longer one.\n\nNew transaction broadcasts do not necessarily need to reach all nodes. As long as they reach many nodes, they will get into a block before long. Block broadcasts are also tolerant of dropped messages. If a node does not receive a block, it will request it when it receives the next block and realizes it missed one.",
 56 |     "source": "data/bitcoin-whitepaper.md",
 57 |     "section_number": 5,
 58 |     "date_indexed": "2025-03-20T15:28:42.816844",
 59 |     "tags": [],
 60 |     "category": []
 61 |   },
 62 |   {
 63 |     "id": "bitcoin-whitepaper.md_section_6",
 64 |     "title": "6. Incentive",
 65 |     "text": "By convention, the first transaction in a block is a special transaction that starts a new coin owned by the creator of the block. This adds an incentive for nodes to support the network, and provides a way to initially distribute coins into circulation, since there is no central authority to issue them. The steady addition of a constant of amount of new coins is analogous to gold miners expending resources to add gold to circulation. In our case, it is CPU time and electricity that is expended.\n\nThe incentive can also be funded with transaction fees. If the output value of a transaction is less than its input value, the difference is a transaction fee that is added to the incentive value of the block containing the transaction. Once a predetermined number of coins have entered circulation, the incentive can transition entirely to transaction fees and be completely inflation free.\n\nThe incentive may help encourage nodes to stay honest. If a greedy attacker is able to assemble more CPU power than all the honest nodes, he would have to choose between using it to defraud people by stealing back his payments, or using it to generate new coins. He ought to find it more profitable to play by the rules, such rules that favour him with more new coins than everyone else combined, than to undermine the system and the validity of his own wealth.",
 66 |     "source": "data/bitcoin-whitepaper.md",
 67 |     "section_number": 6,
 68 |     "date_indexed": "2025-03-20T15:28:42.816845",
 69 |     "tags": [],
 70 |     "category": []
 71 |   },
 72 |   {
 73 |     "id": "bitcoin-whitepaper.md_section_7",
 74 |     "title": "7. Reclaiming Disk Space",
 75 |     "text": "Once the latest transaction in a coin is buried under enough blocks, the spent transactions before it can be discarded to save disk space. To facilitate this without breaking the block's hash, transactions are hashed in a Merkle Tree [^7] [^2] [^5], with only the root included in the block's hash. Old blocks can then be compacted by stubbing off branches of the tree. The interior hashes do not need to be stored.\n\n```\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502                                          \u2502    \u2502                                          \u2502\n\u2502 Block \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510    \u2502    \u2502 Block \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510    \u2502\n\u2502       \u2502  Block Header (Block Hash)  \u2502    \u2502    \u2502       \u2502  Block Header (Block Hash)  \u2502    \u2502\n\u2502       \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510  \u2502    \u2502    \u2502       \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510  \u2502    \u2502\n\u2502       \u2502 \u2502 Prev Hash  \u2502 \u2502 Nonce   \u2502  \u2502    \u2502    \u2502       \u2502 \u2502 Prev Hash  \u2502 \u2502 Nonce   \u2502  \u2502    \u2502\n\u2502       \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518  \u2502    \u2502    \u2502       \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518  \u2502    \u2502\n\u2502       \u2502                             \u2502    \u2502    \u2502       \u2502                             \u2502    \u2502\n\u2502       \u2502     \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510         \u2502    \u2502    \u2502       \u2502     \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510         \u2502    \u2502\n\u2502       \u2502     \u2502  Root Hash  \u2502         \u2502    \u2502    \u2502       \u2502     \u2502  Root Hash  \u2502         \u2502    \u2502\n\u2502       \u2502     \u2514\u2500\u2500\u2500\u2500\u2500\u25b2\u2500\u25b2\u2500\u2500\u2500\u2500\u2500\u2518         \u2502    \u2502    \u2502       \u2502     \u2514\u2500\u2500\u2500\u2500\u2500\u25b2\u2500\u25b2\u2500\u2500\u2500\u2500\u2500\u2518         \u2502    \u2502\n\u2502       \u2502           \u2502 \u2502               \u2502    \u2502    \u2502       \u2502           \u2502 \u2502               \u2502    \u2502\n\u2502       \u2502           \u2502 \u2502               \u2502    \u2502    \u2502       \u2502           \u2502 \u2502               \u2502    \u2502\n\u2502       \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518    \u2502    \u2502       \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518    \u2502\n\u2502                   \u2502 \u2502                    \u2502    \u2502                   \u2502 \u2502                    \u2502\n\u2502     ..........    \u2502 \u2502     ..........     \u2502    \u2502     \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510    \u2502 \u2502     ..........     \u2502\n\u2502     .        \u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500.        .     \u2502    \u2502     \u2502        \u251c\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500.        .     \u2502\n\u2502     . Hash01 .            . Hash23 .     \u2502    \u2502     \u2502 Hash01 \u2502            . Hash23 .     \u2502\n\u2502     .\u25b2.....\u25b2..            .\u25b2.....\u25b2..     \u2502    \u2502     \u2502        \u2502            .\u25b2.....\u25b2..     \u2502\n\u2502      \u2502     \u2502               \u2502     \u2502       \u2502    \u2502     \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518             \u2502     \u2502       \u2502\n\u2502      \u2502     \u2502               \u2502     \u2502       \u2502    \u2502                            \u2502     \u2502       \u2502\n\u2502      \u2502     \u2502               \u2502     \u2502       \u2502    \u2502                            \u2502     \u2502       \u2502\n\u2502 .....\u2502.. ..\u2502.....     .....\u2502.. ..\u2502.....  \u2502    \u2502                       \u250c\u2500\u2500\u2500\u2500\u2534\u2500\u2510 ..\u2502.....  \u2502\n\u2502 .      . .      .     .      . .      .  \u2502    \u2502                       \u2502      \u2502 .      .  \u2502\n\u2502 .Hash0 . .Hash1 .     .Hash2 . .Hash3 .  \u2502    \u2502                       \u2502Hash2 \u2502 .Hash3 .  \u2502\n\u2502 ...\u25b2.... ...\u25b2....     ...\u25b2.... ...\u25b2....  \u2502    \u2502                       \u2502      \u2502 .      .  \u2502\n\u2502    \u2502        \u2502            \u2502        \u2502      \u2502    \u2502                       \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2518 ...\u25b2....  \u2502\n\u2502    \u2502        \u2502            \u2502        \u2502      \u2502    \u2502                                   \u2502      \u2502\n\u2502    \u2502        \u2502            \u2502        \u2502      \u2502    \u2502                                   \u2502      \u2502\n\u2502 \u250c\u2500\u2500\u2534\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2534\u2500\u2500\u2500\u2510     \u250c\u2500\u2500\u2534\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2534\u2500\u2500\u2500\u2510  \u2502    \u2502                                \u250c\u2500\u2500\u2534\u2500\u2500\u2500\u2510  \u2502\n\u2502 \u2502 Tx0  \u2502 \u2502 Tx1  \u2502     \u2502 Tx2  \u2502 \u2502 Tx3  \u2502  \u2502    \u2502                                \u2502 Tx3  \u2502  \u2502\n\u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2518     \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2518  \u2502    \u2502                                \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2518  \u2502\n\u2502                                          \u2502    \u2502                                          \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518    \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n     Transactions Hashed in a Merkle Tree              After Pruning Tx0-2 from the Block\n```\n\nA block header with no transactions would be about 80 bytes. If we suppose blocks are generated every 10 minutes, 80 bytes * 6 * 24 * 365 = 4.2MB per year. With computer systems typically selling with 2GB of RAM as of 2008, and Moore's Law predicting current growth of 1.2GB per year, storage should not be a problem even if the block headers must be kept in memory.",
 76 |     "source": "data/bitcoin-whitepaper.md",
 77 |     "section_number": 7,
 78 |     "date_indexed": "2025-03-20T15:28:42.816846",
 79 |     "tags": [],
 80 |     "category": []
 81 |   },
 82 |   {
 83 |     "id": "bitcoin-whitepaper.md_section_8",
 84 |     "title": "8. Simplified Payment Verification",
 85 |     "text": "It is possible to verify payments without running a full network node. A user only needs to keep a copy of the block headers of the longest proof-of-work chain, which he can get by querying network nodes until he's convinced he has the longest chain, and obtain the Merkle branch linking the transaction to the block it's timestamped in. He can't check the transaction for himself, but by linking it to a place in the chain, he can see that a network node has accepted it, and blocks added after it further confirm the network has accepted it.\n\n```\n     Longest Proof-of-Work Chain\n        \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510      \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510       \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n        \u2502   Block Header                         \u2502      \u2502   Block Header                         \u2502       \u2502   Block Header                         \u2502\n        \u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502      \u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502       \u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502\n \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u25ba\u2502 Prev Hash        \u2502 \u2502 Nonce        \u2502 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u25ba\u2502 Prev Hash        \u2502 \u2502 Nonce        \u2502 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u25ba\u2502 Prev Hash        \u2502 \u2502 Nonce        \u2502 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25ba\n        \u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502      \u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502       \u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502\n        \u2502                                        \u2502      \u2502                                        \u2502       \u2502                                        \u2502\n        \u2502     \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510              \u2502      \u2502    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510              \u2502       \u2502     \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510              \u2502\n        \u2502     \u2502   Merkle Root     \u2502              \u2502      \u2502    \u2502   Merkle Root      \u2502              \u2502       \u2502     \u2502   Merkle Root     \u2502              \u2502\n        \u2502     \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518              \u2502      \u2502    \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25b2\u2500\u25b2\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518              \u2502       \u2502     \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518              \u2502\n        \u2502                                        \u2502      \u2502             \u2502 \u2502                        \u2502       \u2502                                        \u2502\n        \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518      \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518       \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n                                                                      \u2502 \u2502\n                                                                      \u2502 \u2502\n                                                        \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510    \u2502 \u2502     ..........\n                                                        \u2502        \u251c\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500.        .\n                                                        \u2502 Hash01 \u2502            . Hash23 .\n                                                        \u2502        \u2502            .\u25b2.....\u25b2..\n                                                        \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518             \u2502     \u2502\n                                                                               \u2502     \u2502\n                                                                               \u2502     \u2502   Merkle Branch for Tx3\n                                                                               \u2502     \u2502\n                                                                         \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2510 ..\u2502.....\n                                                                         \u2502       \u2502 .      .\n                                                                         \u2502 Hash2 \u2502 .Hash3 .\n                                                                         \u2502       \u2502 .      .\n                                                                         \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 ...\u25b2....\n                                                                                      \u2502\n                                                                                      \u2502\n                                                                                  \u250c\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2510\n                                                                                  \u2502  Tx3  \u2502\n                                                                                  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n```\n\nAs such, the verification is reliable as long as honest nodes control the network, but is more vulnerable if the network is overpowered by an attacker. While network nodes can verify transactions for themselves, the simplified method can be fooled by an attacker's fabricated transactions for as long as the attacker can continue to overpower the network. One strategy to protect against this would be to accept alerts from network nodes when they detect an invalid block, prompting the user's software to download the full block and alerted transactions to confirm the inconsistency. Businesses that receive frequent payments will probably still want to run their own nodes for more independent security and quicker verification.",
 86 |     "source": "data/bitcoin-whitepaper.md",
 87 |     "section_number": 8,
 88 |     "date_indexed": "2025-03-20T15:28:42.816847",
 89 |     "tags": [],
 90 |     "category": []
 91 |   },
 92 |   {
 93 |     "id": "bitcoin-whitepaper.md_section_9",
 94 |     "title": "9. Combining and Splitting Value",
 95 |     "text": "Although it would be possible to handle coins individually, it would be unwieldy to make a separate transaction for every cent in a transfer. To allow value to be split and combined, transactions contain multiple inputs and outputs. Normally there will be either a single input from a larger previous transaction or multiple inputs combining smaller amounts, and at most two outputs: one for the payment, and one returning the change, if any, back to the sender.\n\n```\n     \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n     \u2502 Transaction          \u2502\n     \u2502                      \u2502\n     \u2502   \u250c\u2500\u2500\u2500\u2500\u2500\u2510  \u250c\u2500\u2500\u2500\u2500\u2500\u2510   \u2502\n\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u25ba\u2502 in  \u2502  \u2502 out \u2502 \u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u25ba\n     \u2502   \u2514\u2500\u2500\u2500\u2500\u2500\u2518  \u2514\u2500\u2500\u2500\u2500\u2500\u2518   \u2502\n     \u2502                      \u2502\n     \u2502                      \u2502\n     \u2502   \u250c\u2500\u2500\u2500\u2500\u2500\u2510  \u250c\u2500\u2500\u2500\u2500\u2500\u2510   \u2502\n\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u25ba\u2502 in  \u2502  \u2502 ... \u2502 \u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u25ba\n     \u2502   \u2514\u2500\u2500\u2500\u2500\u2500\u2518  \u2514\u2500\u2500\u2500\u2500\u2500\u2518   \u2502\n     \u2502                      \u2502\n     \u2502                      \u2502\n     \u2502   \u250c\u2500\u2500\u2500\u2500\u2500\u2510            \u2502\n\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u25ba\u2502...  \u2502            \u2502\n     \u2502   \u2514\u2500\u2500\u2500\u2500\u2500\u2518            \u2502\n     \u2502                      \u2502\n     \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n```\nIt should be noted that fan-out, where a transaction depends on several transactions, and those transactions depend on many more, is not a problem here. There is never the need to extract a complete standalone copy of a transaction's history.",
 96 |     "source": "data/bitcoin-whitepaper.md",
 97 |     "section_number": 9,
 98 |     "date_indexed": "2025-03-20T15:28:42.816849",
 99 |     "tags": [],
100 |     "category": []
101 |   },
102 |   {
103 |     "id": "bitcoin-whitepaper.md_section_10",
104 |     "title": "10. Privacy",
105 |     "text": "The traditional banking model achieves a level of privacy by limiting access to information to the parties involved and the trusted third party. The necessity to announce all transactions publicly precludes this method, but privacy can still be maintained by breaking the flow of information in another place: by keeping public keys anonymous. The public can see that someone is sending an amount to someone else, but without information linking the transaction to anyone. This is similar to the level of information released by stock exchanges, where the time and size of individual trades, the \"tape\", is made public, but without telling who the parties were.\n\n```\nTraditional Privacy Models                                                \u2502\n                                      \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510   \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510  \u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510    \u2502  Trusted    \u2502   \u2502              \u2502  \u2502  \u2502        \u2502\n\u2502  Identities  \u251c\u2500\u2500\u2524 Transactions \u251c\u2500\u2500\u2500\u25ba\u2502 Third Party \u251c\u2500\u2500\u25ba\u2502 Counterparty \u2502  \u2502  \u2502 Public \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518    \u2502             \u2502   \u2502              \u2502  \u2502  \u2502        \u2502\n                                      \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518  \u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n                                                                          \u2502\n\nNew Privacy Model\n                                       \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510    \u2502        \u2502\n\u2502  Identities  \u2502 \u2502 \u2502 Transactions \u251c\u2500\u2500\u2500\u25ba\u2502 Public \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518    \u2502        \u2502\n                                       \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n```\nAs an additional firewall, a new key pair should be used for each transaction to keep them from being linked to a common owner. Some linking is still unavoidable with multi-input transactions, which necessarily reveal that their inputs were owned by the same owner. The risk is that if the owner of a key is revealed, linking could reveal other transactions that belonged to the same owner.",
106 |     "source": "data/bitcoin-whitepaper.md",
107 |     "section_number": 10,
108 |     "date_indexed": "2025-03-20T15:28:42.816850",
109 |     "tags": [],
110 |     "category": []
111 |   },
112 |   {
113 |     "id": "bitcoin-whitepaper.md_section_11",
114 |     "title": "11. Calculations",
115 |     "text": "We consider the scenario of an attacker trying to generate an alternate chain faster than the honest chain. Even if this is accomplished, it does not throw the system open to arbitrary changes, such as creating value out of thin air or taking money that never belonged to the attacker. Nodes are not going to accept an invalid transaction as payment, and honest nodes will never accept a block containing them. An attacker can only try to change one of his own transactions to take back money he recently spent.\n\nThe race between the honest chain and an attacker chain can be characterized as a Binomial Random Walk. The success event is the honest chain being extended by one block, increasing its lead by +1, and the failure event is the attacker's chain being extended by one block, reducing the gap by -1.\n\nThe probability of an attacker catching up from a given deficit is analogous to a Gambler's Ruin problem. Suppose a gambler with unlimited credit starts at a deficit and plays potentially an infinite number of trials to try to reach breakeven. We can calculate the probability he ever reaches breakeven, or that an attacker ever catches up with the honest chain, as follows [^8]:\n\n```plaintext\np = probability an honest node finds the next block<\nq = probability the attacker finds the next block\nq = probability the attacker will ever catch up from z blocks behind\n``````\n     \n$$\nqz = \n\\begin{cases} \n1 & \\text{if } p \\leq q \\\\\n\\left(\\frac{q}{p}\\right) z & \\text{if } p > q \n\\end{cases}\n$$\n\nGiven our assumption that p > q, the probability drops exponentially as the number of blocks the attacker has to catch up with increases. With the odds against him, if he doesn't make a lucky lunge forward early on, his chances become vanishingly small as he falls further behind. \n\nWe now consider how long the recipient of a new transaction needs to wait before being sufficiently certain the sender can't change the transaction. We assume the sender is an attacker who wants to make the recipient believe he paid him for a while, then switch it to pay back to himself after some time has passed. The receiver will be alerted when that happens, but the sender hopes it will be too late.\n\nThe receiver generates a new key pair and gives the public key to the sender shortly before signing. This prevents the sender from preparing a chain of blocks ahead of time by working on it continuously until he is lucky enough to get far enough ahead, then executing the transaction at that moment. Once the transaction is sent, the dishonest sender starts working in secret on a parallel chain containing an alternate version of his transaction.\n\nThe recipient waits until the transaction has been added to a block and z blocks have been linked after it. He doesn't know the exact amount of progress the attacker has made, but assuming the honest blocks took the average expected time per block, the attacker's potential progress will be a Poisson distribution with expected value:\n\n$$\n\\lambda = z\\frac{q}{p}\n$$\n\nTo get the probability the attacker could still catch up now, we multiply the Poisson density for each amount of progress he could have made by the probability he could catch up from that point:\n\n$$\n\\sum_{k=0}^{\\infty} \\frac{\\lambda^k e^{-\\lambda}}{k!} \\cdot \\left\\{ \n\\begin{array}{cl} \n\\left(\\frac{q}{p}\\right)^{(z-k)} & \\text{if } k \\leq z \\\\\n1 & \\text{if } k > z \n\\end{array}\n\\right.\n$$\n\nRearranging to avoid summing the infinite tail of the distribution...\n\n$$\n1 - \\sum_{k=0}^{z} \\frac{\\lambda^k e^{-\\lambda}}{k!} \\left(1-\\left(\\frac{q}{p}\\right)^{(z-k)}\\right)\n$$\n\nConverting to C code...\n\n```c\n#include <math.h>\n\ndouble AttackerSuccessProbability(double q, int z)\n{\n    double p = 1.0 - q;\n    double lambda = z * (q / p);\n    double sum = 1.0;\n    int i, k;\n    for (k = 0; k <= z; k++)\n    {\n        double poisson = exp(-lambda);\n        for (i = 1; i <= k; i++)\n            poisson *= lambda / i;\n        sum -= poisson * (1 - pow(q / p, z - k));\n    }\n    return sum;\n}\n```\nRunning some results, we can see the probability drop off exponentially with z.\n\n```plaintext\nq=0.1\nz=0 P=1.0000000\nz=1 P=0.2045873\nz=2 P=0.0509779\nz=3 P=0.0131722\nz=4 P=0.0034552\nz=5 P=0.0009137\nz=6 P=0.0002428\nz=7 P=0.0000647\nz=8 P=0.0000173\nz=9 P=0.0000046\nz=10 P=0.0000012\n\nq=0.3\nz=0 P=1.0000000\nz=5 P=0.1773523\nz=10 P=0.0416605\nz=15 P=0.0101008\nz=20 P=0.0024804\nz=25 P=0.0006132\nz=30 P=0.0001522\nz=35 P=0.0000379\nz=40 P=0.0000095\nz=45 P=0.0000024\nz=50 P=0.0000006\n```\nSolving for P less than 0.1%...\n```plaintext\nP < 0.001\nq=0.10 z=5\nq=0.15 z=8\nq=0.20 z=11\nq=0.25 z=15\nq=0.30 z=24\nq=0.35 z=41\nq=0.40 z=89\nq=0.45 z=340\n```",
116 |     "source": "data/bitcoin-whitepaper.md",
117 |     "section_number": 11,
118 |     "date_indexed": "2025-03-20T15:28:42.816851",
119 |     "tags": [],
120 |     "category": []
121 |   },
122 |   {
123 |     "id": "bitcoin-whitepaper.md_section_12",
124 |     "title": "12. Conclusion",
125 |     "text": "We have proposed a system for electronic transactions without relying on trust. We started with the usual framework of coins made from digital signatures, which provides strong control of ownership, but is incomplete without a way to prevent double-spending. To solve this, we proposed a peer-to-peer network using proof-of-work to record a public history of transactions that quickly becomes computationally impractical for an attacker to change if honest nodes control a majority of CPU power. The network is robust in its unstructured simplicity. Nodes work all at once with little coordination. They do not need to be identified, since messages are not routed to any particular place and only need to be delivered on a best effort basis. Nodes can leave and rejoin the network at will, accepting the proof-of-work chain as proof of what happened while they were gone. They vote with their CPU power, expressing their acceptance of valid blocks by working on extending them and rejecting invalid blocks by refusing to work on them. Any needed rules and incentives can be enforced with this consensus mechanism.\n<br>",
126 |     "source": "data/bitcoin-whitepaper.md",
127 |     "section_number": 12,
128 |     "date_indexed": "2025-03-20T15:28:42.816852",
129 |     "tags": [],
130 |     "category": []
131 |   },
132 |   {
133 |     "id": "bitcoin-whitepaper.md_section_13",
134 |     "title": "References",
135 |     "text": "---\n[^1]: W. Dai, \"b-money,\" http://www.weidai.com/bmoney.txt, 1998.\n[^2]: H. Massias, X.S. Avila, and J.-J. Quisquater, \"Design of a secure timestamping service with minimal\ntrust requirements,\" In 20th Symposium on Information Theory in the Benelux, May 1999.\n[^3]: S. Haber, W.S. Stornetta, \"How to time-stamp a digital document,\" In Journal of Cryptology, vol 3, no\n2, pages 99-111, 1991.\n[^4]: D. Bayer, S. Haber, W.S. Stornetta, \"Improving the efficiency and reliability of digital time-stamping,\"\nIn Sequences II: Methods in Communication, Security and Computer Science, pages 329-334, 1993.\n[^5]: S. Haber, W.S. Stornetta, \"Secure names for bit-strings,\" In Proceedings of the 4th ACM Conference\non Computer and Communications Security, pages 28-35, April 1997.\n[^6]: A. Back, \"Hashcash - a denial of service counter-measure,\"\nhttp://www.hashcash.org/papers/hashcash.pdf, 2002.\n[^7]: R.C. Merkle, \"Protocols for public key cryptosystems,\" In Proc. 1980 Symposium on Security and\nPrivacy, IEEE Computer Society, pages 122-133, April 1980.\n[^8]: W. Feller, \"An introduction to probability theory and its applications,\" 1957.",
136 |     "source": "data/bitcoin-whitepaper.md",
137 |     "section_number": 13,
138 |     "date_indexed": "2025-03-20T15:28:42.816854",
139 |     "tags": [],
140 |     "category": []
141 |   }
142 | ]
```
Page 4/4FirstPrevNextLast