diff --git a/armsrc/hitag2.c b/armsrc/hitag2.c index b58ca1b81..03aedc7a1 100644 --- a/armsrc/hitag2.c +++ b/armsrc/hitag2.c @@ -1398,14 +1398,14 @@ void ReaderHitag(hitag_function htf, hitag_data *htd) { // Tag specific configuration settings (sof, timings, etc.) // TODO HTS -/* if (htf <= HTS_LAST_CMD) { - // hitagS settings - t_wait_1 = 204; - t_wait_2 = 128; - flipped_bit = 0; - tag_size = 8; - DBG DbpString("Configured for hitagS reader"); - } else */ + /* if (htf <= HTS_LAST_CMD) { + // hitagS settings + t_wait_1 = 204; + t_wait_2 = 128; + flipped_bit = 0; + tag_size = 8; + DBG DbpString("Configured for hitagS reader"); + } else */ if (htf <= HT1_LAST_CMD) { // hitag1 settings t_wait_1 = 204; @@ -1724,24 +1724,24 @@ void WriterHitag(hitag_function htf, hitag_data *htd, int page) { // Tag specific configuration settings (sof, timings, etc.) // TODO HTS -/* if (htf <= HTS_LAST_CMD) { - // hitagS settings - t_wait_1 = 204; - t_wait_2 = 128; - //tag_size = 256; - flipped_bit = 0; - tag_size = 8; - DbpString("Configured for hitagS writer"); - } else */ + /* if (htf <= HTS_LAST_CMD) { + // hitagS settings + t_wait_1 = 204; + t_wait_2 = 128; + //tag_size = 256; + flipped_bit = 0; + tag_size = 8; + DbpString("Configured for hitagS writer"); + } else */ // TODO HT1 -/* if (htf <= HT1_LAST_CMD) { - // hitag1 settings - t_wait_1 = 204; - t_wait_2 = 128; - tag_size = 256; - flipped_bit = 0; - DbpString("Configured for hitag1 writer"); - } else */ + /* if (htf <= HT1_LAST_CMD) { + // hitag1 settings + t_wait_1 = 204; + t_wait_2 = 128; + tag_size = 256; + flipped_bit = 0; + DbpString("Configured for hitag1 writer"); + } else */ // if (htf <= HT2_LAST_CMD) { // hitag2 settings t_wait_1 = HITAG_T_WAIT_1_MIN; diff --git a/armsrc/lfops.c b/armsrc/lfops.c index 985ec3bd9..7eececdc9 100644 --- a/armsrc/lfops.c +++ b/armsrc/lfops.c @@ -2041,15 +2041,15 @@ void T55xx_ChkPwds(uint8_t flags) { if (isok != sizeof(counter)) goto OUT; - pwd_count = (uint16_t)(counter[1] << 8 | counter[0]); + pwd_count = (uint16_t)(counter[1] << 8 | counter[0]); if (pwd_count == 0) goto OUT; - + // since flash can report way too many pwds, we need to limit it. // bigbuff EM size is determined by CARD_MEMORY_SIZE // a password is 4bytes. uint16_t pwd_size_available = MIN(CARD_MEMORY_SIZE, pwd_count * 4); - + // adjust available pwd_count pwd_count = pwd_size_available / 4; diff --git a/armsrc/mifarecmd.c b/armsrc/mifarecmd.c index fcf69f666..79efc8b9f 100644 --- a/armsrc/mifarecmd.c +++ b/armsrc/mifarecmd.c @@ -1389,11 +1389,11 @@ void MifareChkKeys_fast(uint32_t arg0, uint32_t arg1, uint32_t arg2, uint8_t *da if (keyCount == 0) goto OUT; - + // limit size of availlable for keys in bigbuff // a key is 6bytes uint16_t key_mem_available = MIN(BIGBUF_SIZE, keyCount * 6); - + keyCount = key_mem_available / 6; datain = BigBuf_malloc(key_mem_available); @@ -1715,9 +1715,9 @@ void MifareChkKeys(uint8_t *datain) { bool clearTrace = datain[2]; uint16_t key_count = (datain[3] << 8) | datain[4]; - uint16_t key_mem_available = MIN( (PM3_CMD_DATA_SIZE - 5) , key_count * 6); + uint16_t key_mem_available = MIN((PM3_CMD_DATA_SIZE - 5), key_count * 6); key_count = key_mem_available / 6; - + datain += 5; LEDsoff(); diff --git a/client/deps/hardnested/hardnested_bf_core.c b/client/deps/hardnested/hardnested_bf_core.c index a5196be37..b8fd56cf2 100644 --- a/client/deps/hardnested/hardnested_bf_core.c +++ b/client/deps/hardnested/hardnested_bf_core.c @@ -564,13 +564,13 @@ static SIMDExecInstr GetSIMDInstr(void) { else #endif #if defined(COMPILER_HAS_SIMD) - if (__builtin_cpu_supports("avx2")) instr = SIMD_AVX2; - else if (__builtin_cpu_supports("avx")) instr = SIMD_AVX; - else if (__builtin_cpu_supports("sse2")) instr = SIMD_SSE2; - else if (__builtin_cpu_supports("mmx")) instr = SIMD_MMX; - else + if (__builtin_cpu_supports("avx2")) instr = SIMD_AVX2; + else if (__builtin_cpu_supports("avx")) instr = SIMD_AVX; + else if (__builtin_cpu_supports("sse2")) instr = SIMD_SSE2; + else if (__builtin_cpu_supports("mmx")) instr = SIMD_MMX; + else #endif - instr = SIMD_NONE; + instr = SIMD_NONE; return instr; } diff --git a/client/deps/hardnested/hardnested_bitarray_core.c b/client/deps/hardnested/hardnested_bitarray_core.c index 19d1274f9..d62da774c 100644 --- a/client/deps/hardnested/hardnested_bitarray_core.c +++ b/client/deps/hardnested/hardnested_bitarray_core.c @@ -311,13 +311,13 @@ uint32_t *malloc_bitarray_dispatch(uint32_t x) { else #endif #if defined(COMPILER_HAS_SIMD) - if (__builtin_cpu_supports("avx2")) malloc_bitarray_function_p = &malloc_bitarray_AVX2; - else if (__builtin_cpu_supports("avx")) malloc_bitarray_function_p = &malloc_bitarray_AVX; - else if (__builtin_cpu_supports("sse2")) malloc_bitarray_function_p = &malloc_bitarray_SSE2; - else if (__builtin_cpu_supports("mmx")) malloc_bitarray_function_p = &malloc_bitarray_MMX; - else + if (__builtin_cpu_supports("avx2")) malloc_bitarray_function_p = &malloc_bitarray_AVX2; + else if (__builtin_cpu_supports("avx")) malloc_bitarray_function_p = &malloc_bitarray_AVX; + else if (__builtin_cpu_supports("sse2")) malloc_bitarray_function_p = &malloc_bitarray_SSE2; + else if (__builtin_cpu_supports("mmx")) malloc_bitarray_function_p = &malloc_bitarray_MMX; + else #endif - malloc_bitarray_function_p = &malloc_bitarray_NOSIMD; + malloc_bitarray_function_p = &malloc_bitarray_NOSIMD; // call the most optimized function for this CPU return (*malloc_bitarray_function_p)(x); @@ -329,13 +329,13 @@ void free_bitarray_dispatch(uint32_t *x) { else #endif #if defined(COMPILER_HAS_SIMD) - if (__builtin_cpu_supports("avx2")) free_bitarray_function_p = &free_bitarray_AVX2; - else if (__builtin_cpu_supports("avx")) free_bitarray_function_p = &free_bitarray_AVX; - else if (__builtin_cpu_supports("sse2")) free_bitarray_function_p = &free_bitarray_SSE2; - else if (__builtin_cpu_supports("mmx")) free_bitarray_function_p = &free_bitarray_MMX; - else + if (__builtin_cpu_supports("avx2")) free_bitarray_function_p = &free_bitarray_AVX2; + else if (__builtin_cpu_supports("avx")) free_bitarray_function_p = &free_bitarray_AVX; + else if (__builtin_cpu_supports("sse2")) free_bitarray_function_p = &free_bitarray_SSE2; + else if (__builtin_cpu_supports("mmx")) free_bitarray_function_p = &free_bitarray_MMX; + else #endif - free_bitarray_function_p = &free_bitarray_NOSIMD; + free_bitarray_function_p = &free_bitarray_NOSIMD; // call the most optimized function for this CPU (*free_bitarray_function_p)(x); @@ -347,13 +347,13 @@ uint32_t bitcount_dispatch(uint32_t a) { else #endif #if defined(COMPILER_HAS_SIMD) - if (__builtin_cpu_supports("avx2")) bitcount_function_p = &bitcount_AVX2; - else if (__builtin_cpu_supports("avx")) bitcount_function_p = &bitcount_AVX; - else if (__builtin_cpu_supports("sse2")) bitcount_function_p = &bitcount_SSE2; - else if (__builtin_cpu_supports("mmx")) bitcount_function_p = &bitcount_MMX; - else + if (__builtin_cpu_supports("avx2")) bitcount_function_p = &bitcount_AVX2; + else if (__builtin_cpu_supports("avx")) bitcount_function_p = &bitcount_AVX; + else if (__builtin_cpu_supports("sse2")) bitcount_function_p = &bitcount_SSE2; + else if (__builtin_cpu_supports("mmx")) bitcount_function_p = &bitcount_MMX; + else #endif - bitcount_function_p = &bitcount_NOSIMD; + bitcount_function_p = &bitcount_NOSIMD; // call the most optimized function for this CPU return (*bitcount_function_p)(a); @@ -365,13 +365,13 @@ uint32_t count_states_dispatch(uint32_t *bitarray) { else #endif #if defined(COMPILER_HAS_SIMD) - if (__builtin_cpu_supports("avx2")) count_states_function_p = &count_states_AVX2; - else if (__builtin_cpu_supports("avx")) count_states_function_p = &count_states_AVX; - else if (__builtin_cpu_supports("sse2")) count_states_function_p = &count_states_SSE2; - else if (__builtin_cpu_supports("mmx")) count_states_function_p = &count_states_MMX; - else + if (__builtin_cpu_supports("avx2")) count_states_function_p = &count_states_AVX2; + else if (__builtin_cpu_supports("avx")) count_states_function_p = &count_states_AVX; + else if (__builtin_cpu_supports("sse2")) count_states_function_p = &count_states_SSE2; + else if (__builtin_cpu_supports("mmx")) count_states_function_p = &count_states_MMX; + else #endif - count_states_function_p = &count_states_NOSIMD; + count_states_function_p = &count_states_NOSIMD; // call the most optimized function for this CPU return (*count_states_function_p)(bitarray); @@ -383,13 +383,13 @@ void bitarray_AND_dispatch(uint32_t *A, uint32_t *B) { else #endif #if defined(COMPILER_HAS_SIMD) - if (__builtin_cpu_supports("avx2")) bitarray_AND_function_p = &bitarray_AND_AVX2; - else if (__builtin_cpu_supports("avx")) bitarray_AND_function_p = &bitarray_AND_AVX; - else if (__builtin_cpu_supports("sse2")) bitarray_AND_function_p = &bitarray_AND_SSE2; - else if (__builtin_cpu_supports("mmx")) bitarray_AND_function_p = &bitarray_AND_MMX; - else + if (__builtin_cpu_supports("avx2")) bitarray_AND_function_p = &bitarray_AND_AVX2; + else if (__builtin_cpu_supports("avx")) bitarray_AND_function_p = &bitarray_AND_AVX; + else if (__builtin_cpu_supports("sse2")) bitarray_AND_function_p = &bitarray_AND_SSE2; + else if (__builtin_cpu_supports("mmx")) bitarray_AND_function_p = &bitarray_AND_MMX; + else #endif - bitarray_AND_function_p = &bitarray_AND_NOSIMD; + bitarray_AND_function_p = &bitarray_AND_NOSIMD; // call the most optimized function for this CPU (*bitarray_AND_function_p)(A, B); @@ -401,13 +401,13 @@ void bitarray_low20_AND_dispatch(uint32_t *A, uint32_t *B) { else #endif #if defined(COMPILER_HAS_SIMD) - if (__builtin_cpu_supports("avx2")) bitarray_low20_AND_function_p = &bitarray_low20_AND_AVX2; - else if (__builtin_cpu_supports("avx")) bitarray_low20_AND_function_p = &bitarray_low20_AND_AVX; - else if (__builtin_cpu_supports("sse2")) bitarray_low20_AND_function_p = &bitarray_low20_AND_SSE2; - else if (__builtin_cpu_supports("mmx")) bitarray_low20_AND_function_p = &bitarray_low20_AND_MMX; - else + if (__builtin_cpu_supports("avx2")) bitarray_low20_AND_function_p = &bitarray_low20_AND_AVX2; + else if (__builtin_cpu_supports("avx")) bitarray_low20_AND_function_p = &bitarray_low20_AND_AVX; + else if (__builtin_cpu_supports("sse2")) bitarray_low20_AND_function_p = &bitarray_low20_AND_SSE2; + else if (__builtin_cpu_supports("mmx")) bitarray_low20_AND_function_p = &bitarray_low20_AND_MMX; + else #endif - bitarray_low20_AND_function_p = &bitarray_low20_AND_NOSIMD; + bitarray_low20_AND_function_p = &bitarray_low20_AND_NOSIMD; // call the most optimized function for this CPU (*bitarray_low20_AND_function_p)(A, B); @@ -419,13 +419,13 @@ uint32_t count_bitarray_AND_dispatch(uint32_t *A, uint32_t *B) { else #endif #if defined(COMPILER_HAS_SIMD) - if (__builtin_cpu_supports("avx2")) count_bitarray_AND_function_p = &count_bitarray_AND_AVX2; - else if (__builtin_cpu_supports("avx")) count_bitarray_AND_function_p = &count_bitarray_AND_AVX; - else if (__builtin_cpu_supports("sse2")) count_bitarray_AND_function_p = &count_bitarray_AND_SSE2; - else if (__builtin_cpu_supports("mmx")) count_bitarray_AND_function_p = &count_bitarray_AND_MMX; - else + if (__builtin_cpu_supports("avx2")) count_bitarray_AND_function_p = &count_bitarray_AND_AVX2; + else if (__builtin_cpu_supports("avx")) count_bitarray_AND_function_p = &count_bitarray_AND_AVX; + else if (__builtin_cpu_supports("sse2")) count_bitarray_AND_function_p = &count_bitarray_AND_SSE2; + else if (__builtin_cpu_supports("mmx")) count_bitarray_AND_function_p = &count_bitarray_AND_MMX; + else #endif - count_bitarray_AND_function_p = &count_bitarray_AND_NOSIMD; + count_bitarray_AND_function_p = &count_bitarray_AND_NOSIMD; // call the most optimized function for this CPU return (*count_bitarray_AND_function_p)(A, B); @@ -437,13 +437,13 @@ uint32_t count_bitarray_low20_AND_dispatch(uint32_t *A, uint32_t *B) { else #endif #if defined(COMPILER_HAS_SIMD) - if (__builtin_cpu_supports("avx2")) count_bitarray_low20_AND_function_p = &count_bitarray_low20_AND_AVX2; - else if (__builtin_cpu_supports("avx")) count_bitarray_low20_AND_function_p = &count_bitarray_low20_AND_AVX; - else if (__builtin_cpu_supports("sse2")) count_bitarray_low20_AND_function_p = &count_bitarray_low20_AND_SSE2; - else if (__builtin_cpu_supports("mmx")) count_bitarray_low20_AND_function_p = &count_bitarray_low20_AND_MMX; - else + if (__builtin_cpu_supports("avx2")) count_bitarray_low20_AND_function_p = &count_bitarray_low20_AND_AVX2; + else if (__builtin_cpu_supports("avx")) count_bitarray_low20_AND_function_p = &count_bitarray_low20_AND_AVX; + else if (__builtin_cpu_supports("sse2")) count_bitarray_low20_AND_function_p = &count_bitarray_low20_AND_SSE2; + else if (__builtin_cpu_supports("mmx")) count_bitarray_low20_AND_function_p = &count_bitarray_low20_AND_MMX; + else #endif - count_bitarray_low20_AND_function_p = &count_bitarray_low20_AND_NOSIMD; + count_bitarray_low20_AND_function_p = &count_bitarray_low20_AND_NOSIMD; // call the most optimized function for this CPU return (*count_bitarray_low20_AND_function_p)(A, B); @@ -455,13 +455,13 @@ void bitarray_AND4_dispatch(uint32_t *A, uint32_t *B, uint32_t *C, uint32_t *D) else #endif #if defined(COMPILER_HAS_SIMD) - if (__builtin_cpu_supports("avx2")) bitarray_AND4_function_p = &bitarray_AND4_AVX2; - else if (__builtin_cpu_supports("avx")) bitarray_AND4_function_p = &bitarray_AND4_AVX; - else if (__builtin_cpu_supports("sse2")) bitarray_AND4_function_p = &bitarray_AND4_SSE2; - else if (__builtin_cpu_supports("mmx")) bitarray_AND4_function_p = &bitarray_AND4_MMX; - else + if (__builtin_cpu_supports("avx2")) bitarray_AND4_function_p = &bitarray_AND4_AVX2; + else if (__builtin_cpu_supports("avx")) bitarray_AND4_function_p = &bitarray_AND4_AVX; + else if (__builtin_cpu_supports("sse2")) bitarray_AND4_function_p = &bitarray_AND4_SSE2; + else if (__builtin_cpu_supports("mmx")) bitarray_AND4_function_p = &bitarray_AND4_MMX; + else #endif - bitarray_AND4_function_p = &bitarray_AND4_NOSIMD; + bitarray_AND4_function_p = &bitarray_AND4_NOSIMD; // call the most optimized function for this CPU (*bitarray_AND4_function_p)(A, B, C, D); @@ -473,13 +473,13 @@ void bitarray_OR_dispatch(uint32_t *A, uint32_t *B) { else #endif #if defined(COMPILER_HAS_SIMD) - if (__builtin_cpu_supports("avx2")) bitarray_OR_function_p = &bitarray_OR_AVX2; - else if (__builtin_cpu_supports("avx")) bitarray_OR_function_p = &bitarray_OR_AVX; - else if (__builtin_cpu_supports("sse2")) bitarray_OR_function_p = &bitarray_OR_SSE2; - else if (__builtin_cpu_supports("mmx")) bitarray_OR_function_p = &bitarray_OR_MMX; - else + if (__builtin_cpu_supports("avx2")) bitarray_OR_function_p = &bitarray_OR_AVX2; + else if (__builtin_cpu_supports("avx")) bitarray_OR_function_p = &bitarray_OR_AVX; + else if (__builtin_cpu_supports("sse2")) bitarray_OR_function_p = &bitarray_OR_SSE2; + else if (__builtin_cpu_supports("mmx")) bitarray_OR_function_p = &bitarray_OR_MMX; + else #endif - bitarray_OR_function_p = &bitarray_OR_NOSIMD; + bitarray_OR_function_p = &bitarray_OR_NOSIMD; // call the most optimized function for this CPU (*bitarray_OR_function_p)(A, B); @@ -491,13 +491,13 @@ uint32_t count_bitarray_AND2_dispatch(uint32_t *A, uint32_t *B) { else #endif #if defined(COMPILER_HAS_SIMD) - if (__builtin_cpu_supports("avx2")) count_bitarray_AND2_function_p = &count_bitarray_AND2_AVX2; - else if (__builtin_cpu_supports("avx")) count_bitarray_AND2_function_p = &count_bitarray_AND2_AVX; - else if (__builtin_cpu_supports("sse2")) count_bitarray_AND2_function_p = &count_bitarray_AND2_SSE2; - else if (__builtin_cpu_supports("mmx")) count_bitarray_AND2_function_p = &count_bitarray_AND2_MMX; - else + if (__builtin_cpu_supports("avx2")) count_bitarray_AND2_function_p = &count_bitarray_AND2_AVX2; + else if (__builtin_cpu_supports("avx")) count_bitarray_AND2_function_p = &count_bitarray_AND2_AVX; + else if (__builtin_cpu_supports("sse2")) count_bitarray_AND2_function_p = &count_bitarray_AND2_SSE2; + else if (__builtin_cpu_supports("mmx")) count_bitarray_AND2_function_p = &count_bitarray_AND2_MMX; + else #endif - count_bitarray_AND2_function_p = &count_bitarray_AND2_NOSIMD; + count_bitarray_AND2_function_p = &count_bitarray_AND2_NOSIMD; // call the most optimized function for this CPU return (*count_bitarray_AND2_function_p)(A, B); @@ -509,13 +509,13 @@ uint32_t count_bitarray_AND3_dispatch(uint32_t *A, uint32_t *B, uint32_t *C) { else #endif #if defined(COMPILER_HAS_SIMD) - if (__builtin_cpu_supports("avx2")) count_bitarray_AND3_function_p = &count_bitarray_AND3_AVX2; - else if (__builtin_cpu_supports("avx")) count_bitarray_AND3_function_p = &count_bitarray_AND3_AVX; - else if (__builtin_cpu_supports("sse2")) count_bitarray_AND3_function_p = &count_bitarray_AND3_SSE2; - else if (__builtin_cpu_supports("mmx")) count_bitarray_AND3_function_p = &count_bitarray_AND3_MMX; - else + if (__builtin_cpu_supports("avx2")) count_bitarray_AND3_function_p = &count_bitarray_AND3_AVX2; + else if (__builtin_cpu_supports("avx")) count_bitarray_AND3_function_p = &count_bitarray_AND3_AVX; + else if (__builtin_cpu_supports("sse2")) count_bitarray_AND3_function_p = &count_bitarray_AND3_SSE2; + else if (__builtin_cpu_supports("mmx")) count_bitarray_AND3_function_p = &count_bitarray_AND3_MMX; + else #endif - count_bitarray_AND3_function_p = &count_bitarray_AND3_NOSIMD; + count_bitarray_AND3_function_p = &count_bitarray_AND3_NOSIMD; // call the most optimized function for this CPU return (*count_bitarray_AND3_function_p)(A, B, C); @@ -527,13 +527,13 @@ uint32_t count_bitarray_AND4_dispatch(uint32_t *A, uint32_t *B, uint32_t *C, uin else #endif #if defined(COMPILER_HAS_SIMD) - if (__builtin_cpu_supports("avx2")) count_bitarray_AND4_function_p = &count_bitarray_AND4_AVX2; - else if (__builtin_cpu_supports("avx")) count_bitarray_AND4_function_p = &count_bitarray_AND4_AVX; - else if (__builtin_cpu_supports("sse2")) count_bitarray_AND4_function_p = &count_bitarray_AND4_SSE2; - else if (__builtin_cpu_supports("mmx")) count_bitarray_AND4_function_p = &count_bitarray_AND4_MMX; - else + if (__builtin_cpu_supports("avx2")) count_bitarray_AND4_function_p = &count_bitarray_AND4_AVX2; + else if (__builtin_cpu_supports("avx")) count_bitarray_AND4_function_p = &count_bitarray_AND4_AVX; + else if (__builtin_cpu_supports("sse2")) count_bitarray_AND4_function_p = &count_bitarray_AND4_SSE2; + else if (__builtin_cpu_supports("mmx")) count_bitarray_AND4_function_p = &count_bitarray_AND4_MMX; + else #endif - count_bitarray_AND4_function_p = &count_bitarray_AND4_NOSIMD; + count_bitarray_AND4_function_p = &count_bitarray_AND4_NOSIMD; // call the most optimized function for this CPU return (*count_bitarray_AND4_function_p)(A, B, C, D); diff --git a/client/src/cmdlfnexwatch.c b/client/src/cmdlfnexwatch.c index 9f825a438..f8b75594c 100644 --- a/client/src/cmdlfnexwatch.c +++ b/client/src/cmdlfnexwatch.c @@ -43,7 +43,7 @@ static int usage_lf_nexwatch_clone(void) { PrintAndLogEx(NORMAL, " c : card id (decimal)"); PrintAndLogEx(NORMAL, " m : mode (decimal) (0-15, defaults to 1)"); PrintAndLogEx(NORMAL, " n : Nexkey credential"); - PrintAndLogEx(NORMAL, " q : Quadrakey credential"); + PrintAndLogEx(NORMAL, " q : Quadrakey credential"); PrintAndLogEx(NORMAL, ""); PrintAndLogEx(NORMAL, "Examples:"); PrintAndLogEx(NORMAL, " lf nexwatch clone r 5600000000213C9F8F150C"); @@ -55,7 +55,7 @@ static int usage_lf_nexwatch_clone(void) { static int usage_lf_nexwatch_sim(void) { PrintAndLogEx(NORMAL, "Enables simulation of Nexwatch card"); PrintAndLogEx(NORMAL, "You can use raw hex values or create a credential based on id, mode"); - PrintAndLogEx(NORMAL, "and type of credential (Nexkey/Quadrakey)"); + PrintAndLogEx(NORMAL, "and type of credential (Nexkey/Quadrakey)"); PrintAndLogEx(NORMAL, "Simulation runs until the button is pressed or another USB command is issued."); PrintAndLogEx(NORMAL, ""); PrintAndLogEx(NORMAL, "Usage: lf nexwatch sim [h] [c ] [m ] [n|q]"); @@ -65,7 +65,7 @@ static int usage_lf_nexwatch_sim(void) { PrintAndLogEx(NORMAL, " c : card id (decimal)"); PrintAndLogEx(NORMAL, " m : mode (decimal) (0-15, defaults to 1)"); PrintAndLogEx(NORMAL, " n : Nexkey credential"); - PrintAndLogEx(NORMAL, " q : Quadrakey credential"); + PrintAndLogEx(NORMAL, " q : Quadrakey credential"); PrintAndLogEx(NORMAL, ""); PrintAndLogEx(NORMAL, "Examples:"); PrintAndLogEx(NORMAL, " lf nexwatch sim r 5600000000213C9F8F150C"); @@ -76,10 +76,10 @@ static int usage_lf_nexwatch_sim(void) { // scramble parity (1234) -> (4231) static uint8_t nexwatch_parity_swap(uint8_t parity) { - uint8_t a = (((parity >> 3 ) & 1) ); - a |= (((parity >> 1 ) & 1) << 1); - a |= (((parity >> 2 ) & 1) << 2); - a |= ((parity & 1) << 3); + uint8_t a = (((parity >> 3) & 1)); + a |= (((parity >> 1) & 1) << 1); + a |= (((parity >> 2) & 1) << 2); + a |= ((parity & 1) << 3); return a; } // parity check @@ -119,7 +119,7 @@ static int nexwatch_scamble(NexWatchScramble_t action, uint32_t *id, uint32_t *s 28, 24, 20, 16, 12, 8, 4, 0 }; - switch(action) { + switch (action) { case DESCRAMBLE: { *id = 0; for (uint8_t idx = 0; idx < 32; idx++) { @@ -144,7 +144,8 @@ static int nexwatch_scamble(NexWatchScramble_t action, uint32_t *id, uint32_t *s } break; } - default: break; + default: + break; } return PM3_SUCCESS; } @@ -227,7 +228,7 @@ int demodNexWatch(void) { nexwatch_magic_t items[] = { {0xBE, "Quadrakey", 0}, {0x88, "Nexkey", 0} }; uint8_t m_idx; - for ( m_idx = 0; m_idx < ARRAYLEN(items); m_idx++) { + for (m_idx = 0; m_idx < ARRAYLEN(items); m_idx++) { items[m_idx].chk = nexwatch_checksum(items[m_idx].magic, cn, calc_parity); if (items[m_idx].chk == chk) { @@ -236,14 +237,14 @@ int demodNexWatch(void) { } // output - PrintAndLogEx(SUCCESS, " NexWatch raw id : " _YELLOW_("0x%"PRIx32) , rawid); + PrintAndLogEx(SUCCESS, " NexWatch raw id : " _YELLOW_("0x%"PRIx32), rawid); if (m_idx < ARRAYLEN(items)) { PrintAndLogEx(SUCCESS, " fingerprint : " _GREEN_("%s"), items[m_idx].desc); } PrintAndLogEx(SUCCESS, " 88bit id : " _YELLOW_("%"PRIu32) " (" _YELLOW_("0x%"PRIx32)")", cn, cn); PrintAndLogEx(SUCCESS, " mode : %x", mode); - if ( parity == calc_parity) { + if (parity == calc_parity) { PrintAndLogEx(SUCCESS, " parity : %s (0x%X)", _GREEN_("ok"), parity); } else { PrintAndLogEx(WARNING, " parity : %s (0x%X != 0x%X)", _RED_("fail"), parity, calc_parity); @@ -337,7 +338,7 @@ static int CmdNexWatchClone(const char *Cmd) { rawhex[9] |= parity; rawhex[10] |= nexwatch_checksum(magic, cn, parity); } - + for (uint8_t i = 1; i < ARRAYLEN(blocks); i++) { blocks[i] = bytes_to_num(rawhex + ((i - 1) * 4), sizeof(uint32_t)); } diff --git a/client/src/cmdlft55xx.c b/client/src/cmdlft55xx.c index 48ff6dab2..572a07549 100644 --- a/client/src/cmdlft55xx.c +++ b/client/src/cmdlft55xx.c @@ -1301,7 +1301,7 @@ bool tryDetectModulationEx(uint8_t downlink_mode, bool print_config, uint32_t wa config.Q5 = tests[0].Q5; config.ST = tests[0].ST; config.downlink_mode = downlink_mode; - + if (print_config) printConfiguration(config); diff --git a/client/src/comms.c b/client/src/comms.c index eb4548ecd..d2ba5436f 100644 --- a/client/src/comms.c +++ b/client/src/comms.c @@ -551,7 +551,7 @@ bool OpenProxmark(void *port, bool wait_for_port, int timeout, bool flash_mode, do { sp = uart_open(portname, speed); msleep(500); - PrintAndLogEx(INPLACE, "% 3i", timeout - openCount -1); + PrintAndLogEx(INPLACE, "% 3i", timeout - openCount - 1); } while (++openCount < timeout && (sp == INVALID_SERIAL_PORT || sp == CLAIMED_SERIAL_PORT)); }