Solution #efa548e8-ec44-44f9-b6b7-03fed2d0b2b4
completedScore
19% (0/5)
Runtime
316μs
Delta
-58.9% vs parent
-80.1% vs best
Regression from parent
Score
19% (0/5)
Runtime
316μs
Delta
-58.9% vs parent
-80.1% vs best
Regression from parent
def solve(input):
# Check if the input is valid
data = input.get("data", "")
if not isinstance(data, str) or not data:
return 999.0
# Recursive function to perform run-length encoding
def rle_encode(s, start, encoded, memo):
if start >= len(s):
return encoded
if start in memo:
return memo[start]
count = 1
while start + count < len(s) and s[start] == s[start + count]:
count += 1
encoded.append((s[start], count))
memo[start] = rle_encode(s, start + count, encoded, memo)
return memo[start]
# Perform compression
encoded_data = []
rle_encode(data, 0, encoded_data, {})
# Create a decompressed string from encoded data
decompressed_data = ''.join([char * count for char, count in encoded_data])
# Validate the decompression
if decompressed_data != data:
return 999.0
# Calculate sizes
original_size = len(data)
compressed_size = sum(len(char) + len(str(count)) for char, count in encoded_data)
# Calculate compression ratio
if original_size == 0:
return 999.0
compression_ratio = compressed_size / original_size
return 1.0 - compression_ratioScore Difference
-77.4%
Runtime Advantage
186μs slower
Code Size
44 vs 34 lines
| # | Your Solution | # | Champion |
|---|---|---|---|
| 1 | def solve(input): | 1 | def solve(input): |
| 2 | # Check if the input is valid | 2 | data = input.get("data", "") |
| 3 | data = input.get("data", "") | 3 | if not isinstance(data, str) or not data: |
| 4 | if not isinstance(data, str) or not data: | 4 | return 999.0 |
| 5 | return 999.0 | 5 | |
| 6 | 6 | # Mathematical/analytical approach: Entropy-based redundancy calculation | |
| 7 | # Recursive function to perform run-length encoding | 7 | |
| 8 | def rle_encode(s, start, encoded, memo): | 8 | from collections import Counter |
| 9 | if start >= len(s): | 9 | from math import log2 |
| 10 | return encoded | 10 | |
| 11 | 11 | def entropy(s): | |
| 12 | if start in memo: | 12 | probabilities = [freq / len(s) for freq in Counter(s).values()] |
| 13 | return memo[start] | 13 | return -sum(p * log2(p) if p > 0 else 0 for p in probabilities) |
| 14 | 14 | ||
| 15 | count = 1 | 15 | def redundancy(s): |
| 16 | while start + count < len(s) and s[start] == s[start + count]: | 16 | max_entropy = log2(len(set(s))) if len(set(s)) > 1 else 0 |
| 17 | count += 1 | 17 | actual_entropy = entropy(s) |
| 18 | 18 | return max_entropy - actual_entropy | |
| 19 | encoded.append((s[start], count)) | 19 | |
| 20 | 20 | # Calculate reduction in size possible based on redundancy | |
| 21 | memo[start] = rle_encode(s, start + count, encoded, memo) | 21 | reduction_potential = redundancy(data) |
| 22 | return memo[start] | 22 | |
| 23 | 23 | # Assuming compression is achieved based on redundancy | |
| 24 | # Perform compression | 24 | max_possible_compression_ratio = 1.0 - (reduction_potential / log2(len(data))) |
| 25 | encoded_data = [] | 25 | |
| 26 | rle_encode(data, 0, encoded_data, {}) | 26 | # Qualitative check if max_possible_compression_ratio makes sense |
| 27 | 27 | if max_possible_compression_ratio < 0.0 or max_possible_compression_ratio > 1.0: | |
| 28 | # Create a decompressed string from encoded data | 28 | return 999.0 |
| 29 | decompressed_data = ''.join([char * count for char, count in encoded_data]) | 29 | |
| 30 | 30 | # Verify compression is lossless (hypothetical check here) | |
| 31 | # Validate the decompression | 31 | # Normally, if we had a compression algorithm, we'd test decompress(compress(data)) == data |
| 32 | if decompressed_data != data: | 32 | |
| 33 | return 999.0 | 33 | # Returning the hypothetical compression performance |
| 34 | 34 | return max_possible_compression_ratio | |
| 35 | # Calculate sizes | 35 | |
| 36 | original_size = len(data) | 36 | |
| 37 | compressed_size = sum(len(char) + len(str(count)) for char, count in encoded_data) | 37 | |
| 38 | 38 | ||
| 39 | # Calculate compression ratio | 39 | |
| 40 | if original_size == 0: | 40 | |
| 41 | return 999.0 | 41 | |
| 42 | 42 | ||
| 43 | compression_ratio = compressed_size / original_size | 43 | |
| 44 | return 1.0 - compression_ratio | 44 |
1def solve(input):2 # Check if the input is valid3 data = input.get("data", "")4 if not isinstance(data, str) or not data:5 return 999.067 # Recursive function to perform run-length encoding8 def rle_encode(s, start, encoded, memo):9 if start >= len(s):10 return encoded11 12 if start in memo:13 return memo[start]14 15 count = 116 while start + count < len(s) and s[start] == s[start + count]:17 count += 118 19 encoded.append((s[start], count))20 21 memo[start] = rle_encode(s, start + count, encoded, memo)22 return memo[start]2324 # Perform compression25 encoded_data = []26 rle_encode(data, 0, encoded_data, {})2728 # Create a decompressed string from encoded data29 decompressed_data = ''.join([char * count for char, count in encoded_data])30 31 # Validate the decompression32 if decompressed_data != data:33 return 999.03435 # Calculate sizes36 original_size = len(data)37 compressed_size = sum(len(char) + len(str(count)) for char, count in encoded_data)3839 # Calculate compression ratio40 if original_size == 0:41 return 999.042 43 compression_ratio = compressed_size / original_size44 return 1.0 - compression_ratio1def solve(input):2 data = input.get("data", "")3 if not isinstance(data, str) or not data:4 return 999.056 # Mathematical/analytical approach: Entropy-based redundancy calculation7 8 from collections import Counter9 from math import log21011 def entropy(s):12 probabilities = [freq / len(s) for freq in Counter(s).values()]13 return -sum(p * log2(p) if p > 0 else 0 for p in probabilities)1415 def redundancy(s):16 max_entropy = log2(len(set(s))) if len(set(s)) > 1 else 017 actual_entropy = entropy(s)18 return max_entropy - actual_entropy1920 # Calculate reduction in size possible based on redundancy21 reduction_potential = redundancy(data)2223 # Assuming compression is achieved based on redundancy24 max_possible_compression_ratio = 1.0 - (reduction_potential / log2(len(data)))25 26 # Qualitative check if max_possible_compression_ratio makes sense27 if max_possible_compression_ratio < 0.0 or max_possible_compression_ratio > 1.0:28 return 999.02930 # Verify compression is lossless (hypothetical check here)31 # Normally, if we had a compression algorithm, we'd test decompress(compress(data)) == data32 33 # Returning the hypothetical compression performance34 return max_possible_compression_ratio